repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-gemmlowp-sse2.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <emmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_gemmlowp__sse2(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
// Compute requantization parameters.
const uint32_t scale_bits = float_as_uint32(scale);
// Multiplier is in [0x40000000, 0x7FFFFF80] range.
const int32_t multiplier = (int32_t)(((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
// Shift is in [0, 31] range.
const int32_t shift = 127 + 31 - 32 - (float_as_uint32(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short) zero_point);
const __m128i vqmin = _mm_set1_epi16((short) qmin);
const __m128i vqmax = _mm_set1_epi16((short) qmax);
const __m128i vshift = _mm_cvtsi32_si128((int) shift);
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const __m128i vremainder_mask = _mm_set1_epi32((int) remainder_mask);
const __m128i vthreshold = _mm_set1_epi32((int) (remainder_mask >> 1));
const __m128i vq31rounding = _mm_set1_epi64x(UINT64_C(0x40000000));
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*) input);
const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
input += 16;
const __m128i x_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), x);
const __m128i y_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), y);
const __m128i z_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), z);
const __m128i w_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), w);
const __m128i x_abs = _mm_sub_epi32(_mm_xor_si128(x, x_neg_mask), x_neg_mask);
const __m128i y_abs = _mm_sub_epi32(_mm_xor_si128(y, y_neg_mask), y_neg_mask);
const __m128i z_abs = _mm_sub_epi32(_mm_xor_si128(z, z_neg_mask), z_neg_mask);
const __m128i w_abs = _mm_sub_epi32(_mm_xor_si128(w, w_neg_mask), w_neg_mask);
const __m128i x_abs_rev = _mm_shuffle_epi32(x_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_abs_rev = _mm_shuffle_epi32(y_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_abs_rev = _mm_shuffle_epi32(z_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_abs_rev = _mm_shuffle_epi32(w_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_abs_product_even = _mm_mul_epu32(x_abs, vmultiplier);
const __m128i y_abs_product_even = _mm_mul_epu32(y_abs, vmultiplier);
const __m128i z_abs_product_even = _mm_mul_epu32(z_abs, vmultiplier);
const __m128i w_abs_product_even = _mm_mul_epu32(w_abs, vmultiplier);
const __m128i x_neg_mask_even = _mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i y_neg_mask_even = _mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i z_neg_mask_even = _mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i w_neg_mask_even = _mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i x_product_even = _mm_sub_epi64(_mm_xor_si128(x_abs_product_even, x_neg_mask_even), x_neg_mask_even);
const __m128i y_product_even = _mm_sub_epi64(_mm_xor_si128(y_abs_product_even, y_neg_mask_even), y_neg_mask_even);
const __m128i z_product_even = _mm_sub_epi64(_mm_xor_si128(z_abs_product_even, z_neg_mask_even), z_neg_mask_even);
const __m128i w_product_even = _mm_sub_epi64(_mm_xor_si128(w_abs_product_even, w_neg_mask_even), w_neg_mask_even);
const __m128i x_rounded_product_even = _mm_add_epi64(x_product_even, vq31rounding);
const __m128i y_rounded_product_even = _mm_add_epi64(y_product_even, vq31rounding);
const __m128i z_rounded_product_even = _mm_add_epi64(z_product_even, vq31rounding);
const __m128i w_rounded_product_even = _mm_add_epi64(w_product_even, vq31rounding);
const __m128i x_abs_product_odd = _mm_mul_epu32(x_abs_rev, vmultiplier);
const __m128i y_abs_product_odd = _mm_mul_epu32(y_abs_rev, vmultiplier);
const __m128i z_abs_product_odd = _mm_mul_epu32(z_abs_rev, vmultiplier);
const __m128i w_abs_product_odd = _mm_mul_epu32(w_abs_rev, vmultiplier);
const __m128i x_neg_mask_odd = _mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i y_neg_mask_odd = _mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i z_neg_mask_odd = _mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i w_neg_mask_odd = _mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i x_product_odd = _mm_sub_epi64(_mm_xor_si128(x_abs_product_odd, x_neg_mask_odd), x_neg_mask_odd);
const __m128i y_product_odd = _mm_sub_epi64(_mm_xor_si128(y_abs_product_odd, y_neg_mask_odd), y_neg_mask_odd);
const __m128i z_product_odd = _mm_sub_epi64(_mm_xor_si128(z_abs_product_odd, z_neg_mask_odd), z_neg_mask_odd);
const __m128i w_product_odd = _mm_sub_epi64(_mm_xor_si128(w_abs_product_odd, w_neg_mask_odd), w_neg_mask_odd);
const __m128i x_rounded_product_odd = _mm_add_epi64(x_product_odd, vq31rounding);
const __m128i y_rounded_product_odd = _mm_add_epi64(y_product_odd, vq31rounding);
const __m128i z_rounded_product_odd = _mm_add_epi64(z_product_odd, vq31rounding);
const __m128i w_rounded_product_odd = _mm_add_epi64(w_product_odd, vq31rounding);
const __m128i x_q31product_even = _mm_srli_epi64(x_rounded_product_even, 31);
const __m128i x_q31product_odd = _mm_srli_epi64(x_rounded_product_odd, 31);
const __m128i y_q31product_even = _mm_srli_epi64(y_rounded_product_even, 31);
const __m128i y_q31product_odd = _mm_srli_epi64(y_rounded_product_odd, 31);
const __m128i z_q31product_even = _mm_srli_epi64(z_rounded_product_even, 31);
const __m128i z_q31product_odd = _mm_srli_epi64(z_rounded_product_odd, 31);
const __m128i w_q31product_even = _mm_srli_epi64(w_rounded_product_even, 31);
const __m128i w_q31product_odd = _mm_srli_epi64(w_rounded_product_odd, 31);
const __m128i x_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(x_q31product_even), _mm_castsi128_ps(x_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i y_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(y_q31product_even), _mm_castsi128_ps(y_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i z_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(z_q31product_even), _mm_castsi128_ps(z_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i w_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(w_q31product_even), _mm_castsi128_ps(w_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i x_q31product = _mm_shuffle_epi32(x_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i y_q31product = _mm_shuffle_epi32(y_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i z_q31product = _mm_shuffle_epi32(z_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i w_q31product = _mm_shuffle_epi32(w_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i x_remainder =
_mm_add_epi32(_mm_and_si128(x_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), x_q31product));
const __m128i y_remainder =
_mm_add_epi32(_mm_and_si128(y_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), y_q31product));
const __m128i z_remainder =
_mm_add_epi32(_mm_and_si128(z_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), z_q31product));
const __m128i w_remainder =
_mm_add_epi32(_mm_and_si128(w_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), w_q31product));
const __m128i x_scaled =
_mm_sub_epi32(_mm_sra_epi32(x_q31product, vshift), _mm_cmpgt_epi32(x_remainder, vthreshold));
const __m128i y_scaled =
_mm_sub_epi32(_mm_sra_epi32(y_q31product, vshift), _mm_cmpgt_epi32(y_remainder, vthreshold));
const __m128i z_scaled =
_mm_sub_epi32(_mm_sra_epi32(z_q31product, vshift), _mm_cmpgt_epi32(z_remainder, vthreshold));
const __m128i w_scaled =
_mm_sub_epi32(_mm_sra_epi32(w_q31product, vshift), _mm_cmpgt_epi32(w_remainder, vthreshold));
const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xy_clamped = _mm_max_epi16(_mm_min_epi16(xy_packed, vqmax), vqmin);
const __m128i zw_clamped = _mm_max_epi16(_mm_min_epi16(zw_packed, vqmax), vqmin);
const __m128i xyzw_clamped = _mm_packs_epi16(xy_clamped, zw_clamped);
// 16x PSHUFD
// 4x SHUFPS
// 8x PMULUDQ
// 8x PXOR (setzero)
// 12x PXOR
// 4x PAND
// 8x PADDQ
// 4x PADDD
// 8x PSUBQ
// 8x PSUBD
// 8x PSRLQ (immediate)
// 4x PSRAD (register)
// 12x PCMPGTD
// 2x PACKSSDW
// 2x PADDSW
// 2x PMAXSW
// 2x PMINSW
// 1x PACKSSWB
// ---------------------
// 113 instructions total
_mm_storeu_si128((__m128i*) output, xyzw_clamped);
output += 16;
}
}
| 9,770
| 50.973404
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-gemmlowp-sse41.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <smmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_gemmlowp__sse41(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
// Compute requantization parameters.
const uint32_t scale_bits = float_as_uint32(scale);
// Multiplier is in [0x40000000, 0x7FFFFF80] range.
const int32_t multiplier = (int32_t)(((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
// Shift is in [0, 31] range.
const int32_t shift = 127 + 31 - 32 - (float_as_uint32(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short) zero_point);
const __m128i vqmin = _mm_set1_epi8((char) qmin);
const __m128i vqmax = _mm_set1_epi8((char) qmax);
const __m128i vshift = _mm_cvtsi32_si128((int) shift);
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const __m128i vremainder_mask = _mm_set1_epi32((int) remainder_mask);
const __m128i vthreshold = _mm_set1_epi32((int) (remainder_mask >> 1));
const __m128i vq31rounding = _mm_set1_epi64x(UINT64_C(0x40000000));
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*) input);
const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
input += 16;
const __m128i x_rev = _mm_shuffle_epi32(x, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_rev = _mm_shuffle_epi32(y, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_rev = _mm_shuffle_epi32(z, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_rev = _mm_shuffle_epi32(w, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_product_even = _mm_add_epi64(_mm_mul_epi32(x, vmultiplier), vq31rounding);
const __m128i y_product_even = _mm_add_epi64(_mm_mul_epi32(y, vmultiplier), vq31rounding);
const __m128i z_product_even = _mm_add_epi64(_mm_mul_epi32(z, vmultiplier), vq31rounding);
const __m128i w_product_even = _mm_add_epi64(_mm_mul_epi32(w, vmultiplier), vq31rounding);
const __m128i x_product_odd = _mm_add_epi64(_mm_mul_epi32(x_rev, vmultiplier), vq31rounding);
const __m128i y_product_odd = _mm_add_epi64(_mm_mul_epi32(y_rev, vmultiplier), vq31rounding);
const __m128i z_product_odd = _mm_add_epi64(_mm_mul_epi32(z_rev, vmultiplier), vq31rounding);
const __m128i w_product_odd = _mm_add_epi64(_mm_mul_epi32(w_rev, vmultiplier), vq31rounding);
const __m128i x_q31product_even = _mm_srli_epi64(x_product_even, 31);
const __m128i x_q31product_odd = _mm_add_epi64(x_product_odd, x_product_odd);
const __m128i y_q31product_even = _mm_srli_epi64(y_product_even, 31);
const __m128i y_q31product_odd = _mm_add_epi64(y_product_odd, y_product_odd);
const __m128i z_q31product_even = _mm_srli_epi64(z_product_even, 31);
const __m128i z_q31product_odd = _mm_add_epi64(z_product_odd, z_product_odd);
const __m128i w_q31product_even = _mm_srli_epi64(w_product_even, 31);
const __m128i w_q31product_odd = _mm_add_epi64(w_product_odd, w_product_odd);
const __m128i x_q31product = _mm_blend_epi16(x_q31product_even, x_q31product_odd, 0xCC);
const __m128i y_q31product = _mm_blend_epi16(y_q31product_even, y_q31product_odd, 0xCC);
const __m128i z_q31product = _mm_blend_epi16(z_q31product_even, z_q31product_odd, 0xCC);
const __m128i w_q31product = _mm_blend_epi16(w_q31product_even, w_q31product_odd, 0xCC);
const __m128i x_remainder =
_mm_add_epi32(_mm_and_si128(x_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), x_q31product));
const __m128i y_remainder =
_mm_add_epi32(_mm_and_si128(y_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), y_q31product));
const __m128i z_remainder =
_mm_add_epi32(_mm_and_si128(z_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), z_q31product));
const __m128i w_remainder =
_mm_add_epi32(_mm_and_si128(w_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), w_q31product));
const __m128i x_scaled =
_mm_sub_epi32(_mm_sra_epi32(x_q31product, vshift), _mm_cmpgt_epi32(x_remainder, vthreshold));
const __m128i y_scaled =
_mm_sub_epi32(_mm_sra_epi32(y_q31product, vshift), _mm_cmpgt_epi32(y_remainder, vthreshold));
const __m128i z_scaled =
_mm_sub_epi32(_mm_sra_epi32(z_q31product, vshift), _mm_cmpgt_epi32(z_remainder, vthreshold));
const __m128i w_scaled =
_mm_sub_epi32(_mm_sra_epi32(w_q31product, vshift), _mm_cmpgt_epi32(w_remainder, vthreshold));
const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packs_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped = _mm_max_epi8(_mm_min_epi8(xyzw_packed, vqmax), vqmin);
// 4x PSHUFD
// 8x PMULDQ
// 12x PADDQ
// 4x PADDD
// 4x PSUBD
// 4x PSLRQ (immediate)
// 4x PSRAD (register)
// 4x PBLENDW
// 4x PAND
// 4x PXOR (setzero)
// 8x PCMPGTD
// 2x PACKSSDW
// 2x PADDSW
// 1x PACKSSWB
// 1x PMAXSB
// 1x PMINSB
// ---------------------
// 67 instructions total
_mm_storeu_si128((__m128i*) output, xyzw_clamped);
output += 16;
}
}
| 6,037
| 43.397059
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-gemmlowp-ssse3.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <tmmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_gemmlowp__ssse3(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
// Compute requantization parameters.
const uint32_t scale_bits = float_as_uint32(scale);
// Multiplier is in [0x40000000, 0x7FFFFF80] range.
const int32_t multiplier = (int32_t)(((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
// Shift is in [0, 31] range.
const int32_t shift = 127 + 31 - 32 - (float_as_uint32(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short) zero_point);
const __m128i vqmin = _mm_set1_epi16((short) qmin);
const __m128i vqmax = _mm_set1_epi16((short) qmax);
const __m128i vshift = _mm_cvtsi32_si128((int) shift);
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const __m128i vremainder_mask = _mm_set1_epi32((int) remainder_mask);
const __m128i vthreshold = _mm_set1_epi32((int) (remainder_mask >> 1));
const __m128i vq31rounding = _mm_set1_epi64x(UINT64_C(0x40000000));
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*) input);
const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
input += 16;
const __m128i x_abs = _mm_abs_epi32(x);
const __m128i y_abs = _mm_abs_epi32(y);
const __m128i z_abs = _mm_abs_epi32(z);
const __m128i w_abs = _mm_abs_epi32(w);
const __m128i x_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), x);
const __m128i y_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), y);
const __m128i z_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), z);
const __m128i w_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), w);
const __m128i x_abs_rev = _mm_shuffle_epi32(x_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_abs_rev = _mm_shuffle_epi32(y_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_abs_rev = _mm_shuffle_epi32(z_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_abs_rev = _mm_shuffle_epi32(w_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_abs_product_even = _mm_mul_epu32(x_abs, vmultiplier);
const __m128i y_abs_product_even = _mm_mul_epu32(y_abs, vmultiplier);
const __m128i z_abs_product_even = _mm_mul_epu32(z_abs, vmultiplier);
const __m128i w_abs_product_even = _mm_mul_epu32(w_abs, vmultiplier);
const __m128i x_neg_mask_even = _mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i y_neg_mask_even = _mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i z_neg_mask_even = _mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i w_neg_mask_even = _mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i x_product_even = _mm_sub_epi64(_mm_xor_si128(x_abs_product_even, x_neg_mask_even), x_neg_mask_even);
const __m128i y_product_even = _mm_sub_epi64(_mm_xor_si128(y_abs_product_even, y_neg_mask_even), y_neg_mask_even);
const __m128i z_product_even = _mm_sub_epi64(_mm_xor_si128(z_abs_product_even, z_neg_mask_even), z_neg_mask_even);
const __m128i w_product_even = _mm_sub_epi64(_mm_xor_si128(w_abs_product_even, w_neg_mask_even), w_neg_mask_even);
const __m128i x_rounded_product_even = _mm_add_epi64(x_product_even, vq31rounding);
const __m128i y_rounded_product_even = _mm_add_epi64(y_product_even, vq31rounding);
const __m128i z_rounded_product_even = _mm_add_epi64(z_product_even, vq31rounding);
const __m128i w_rounded_product_even = _mm_add_epi64(w_product_even, vq31rounding);
const __m128i x_abs_product_odd = _mm_mul_epu32(x_abs_rev, vmultiplier);
const __m128i y_abs_product_odd = _mm_mul_epu32(y_abs_rev, vmultiplier);
const __m128i z_abs_product_odd = _mm_mul_epu32(z_abs_rev, vmultiplier);
const __m128i w_abs_product_odd = _mm_mul_epu32(w_abs_rev, vmultiplier);
const __m128i x_neg_mask_odd = _mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i y_neg_mask_odd = _mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i z_neg_mask_odd = _mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i w_neg_mask_odd = _mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i x_product_odd = _mm_sub_epi64(_mm_xor_si128(x_abs_product_odd, x_neg_mask_odd), x_neg_mask_odd);
const __m128i y_product_odd = _mm_sub_epi64(_mm_xor_si128(y_abs_product_odd, y_neg_mask_odd), y_neg_mask_odd);
const __m128i z_product_odd = _mm_sub_epi64(_mm_xor_si128(z_abs_product_odd, z_neg_mask_odd), z_neg_mask_odd);
const __m128i w_product_odd = _mm_sub_epi64(_mm_xor_si128(w_abs_product_odd, w_neg_mask_odd), w_neg_mask_odd);
const __m128i x_rounded_product_odd = _mm_add_epi64(x_product_odd, vq31rounding);
const __m128i y_rounded_product_odd = _mm_add_epi64(y_product_odd, vq31rounding);
const __m128i z_rounded_product_odd = _mm_add_epi64(z_product_odd, vq31rounding);
const __m128i w_rounded_product_odd = _mm_add_epi64(w_product_odd, vq31rounding);
const __m128i x_q31product_even = _mm_srli_epi64(x_rounded_product_even, 31);
const __m128i x_q31product_odd = _mm_srli_epi64(x_rounded_product_odd, 31);
const __m128i y_q31product_even = _mm_srli_epi64(y_rounded_product_even, 31);
const __m128i y_q31product_odd = _mm_srli_epi64(y_rounded_product_odd, 31);
const __m128i z_q31product_even = _mm_srli_epi64(z_rounded_product_even, 31);
const __m128i z_q31product_odd = _mm_srli_epi64(z_rounded_product_odd, 31);
const __m128i w_q31product_even = _mm_srli_epi64(w_rounded_product_even, 31);
const __m128i w_q31product_odd = _mm_srli_epi64(w_rounded_product_odd, 31);
const __m128i x_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(x_q31product_even), _mm_castsi128_ps(x_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i y_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(y_q31product_even), _mm_castsi128_ps(y_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i z_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(z_q31product_even), _mm_castsi128_ps(z_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i w_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(w_q31product_even), _mm_castsi128_ps(w_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i x_q31product = _mm_shuffle_epi32(x_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i y_q31product = _mm_shuffle_epi32(y_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i z_q31product = _mm_shuffle_epi32(z_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i w_q31product = _mm_shuffle_epi32(w_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i x_remainder =
_mm_add_epi32(_mm_and_si128(x_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), x_q31product));
const __m128i y_remainder =
_mm_add_epi32(_mm_and_si128(y_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), y_q31product));
const __m128i z_remainder =
_mm_add_epi32(_mm_and_si128(z_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), z_q31product));
const __m128i w_remainder =
_mm_add_epi32(_mm_and_si128(w_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), w_q31product));
const __m128i x_scaled =
_mm_sub_epi32(_mm_sra_epi32(x_q31product, vshift), _mm_cmpgt_epi32(x_remainder, vthreshold));
const __m128i y_scaled =
_mm_sub_epi32(_mm_sra_epi32(y_q31product, vshift), _mm_cmpgt_epi32(y_remainder, vthreshold));
const __m128i z_scaled =
_mm_sub_epi32(_mm_sra_epi32(z_q31product, vshift), _mm_cmpgt_epi32(z_remainder, vthreshold));
const __m128i w_scaled =
_mm_sub_epi32(_mm_sra_epi32(w_q31product, vshift), _mm_cmpgt_epi32(w_remainder, vthreshold));
const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xy_clamped = _mm_max_epi16(_mm_min_epi16(xy_packed, vqmax), vqmin);
const __m128i zw_clamped = _mm_max_epi16(_mm_min_epi16(zw_packed, vqmax), vqmin);
const __m128i xyzw_clamped = _mm_packs_epi16(xy_clamped, zw_clamped);
// 16x PSHUFD
// 4x SHUFPS
// 8x PMULUDQ
// 8x PXOR (setzero)
// 8x PXOR
// 4x PAND
// 8x PADDQ
// 4x PADDD
// 8x PSUBQ
// 4x PSUBD
// 8x PSRLQ (immediate)
// 4x PSRAD (register)
// 12x PCMPGTD
// 4x PABSD
// 2x PACKSSDW
// 2x PADDSW
// 2x PMAXSW
// 2x PMINSW
// 1x PACKSSWB
// ---------------------
// 109 instructions total
_mm_storeu_si128((__m128i*) output, xyzw_clamped);
output += 16;
}
}
| 9,630
| 49.957672
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-gemmlowp-wasmsimd.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_gemmlowp__wasmsimd(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
// Compute requantization parameters.
const uint32_t scale_bits = float_as_uint32(scale);
// Multiplier is in [0x40000000, 0x7FFFFF80] range.
const int32_t multiplier = (int32_t) (((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
const int64_t twice_multiplier = INT64_C(2) * (int64_t) multiplier;
// Shift is in [0, 31] range.
const int32_t shift = 127 + 31 - 32 - (float_as_uint32(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
const v128_t vmultiplier = wasm_i64x2_make(twice_multiplier, twice_multiplier);
const v128_t vzero_point = wasm_i16x8_splat((int16_t) zero_point);
const v128_t vqmin = wasm_i8x16_splat(qmin);
const v128_t vqmax = wasm_i8x16_splat(qmax);
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const v128_t vremainder_mask = wasm_i32x4_splat((int32_t) remainder_mask);
const v128_t vthreshold = wasm_i32x4_splat((int32_t) (remainder_mask >> 1));
const v128_t vtwice_q31rounding = wasm_i64x2_splat(INT64_C(0x80000000));
for (; n != 0; n -= 16) {
const v128_t x = wasm_v128_load(input);
const v128_t y = wasm_v128_load(input + 4);
const v128_t z = wasm_v128_load(input + 8);
const v128_t w = wasm_v128_load(input + 12);
input += 16;
const v128_t x_sign = wasm_i32x4_shr(x, 31);
const v128_t y_sign = wasm_i32x4_shr(y, 31);
const v128_t z_sign = wasm_i32x4_shr(z, 31);
const v128_t w_sign = wasm_i32x4_shr(w, 31);
const v128_t x_lo = wasm_v32x4_shuffle(x, x_sign, 0, 4, 1, 5);
const v128_t y_lo = wasm_v32x4_shuffle(y, y_sign, 0, 4, 1, 5);
const v128_t z_lo = wasm_v32x4_shuffle(z, z_sign, 0, 4, 1, 5);
const v128_t w_lo = wasm_v32x4_shuffle(w, w_sign, 0, 4, 1, 5);
const v128_t x_hi = wasm_v32x4_shuffle(x, x_sign, 2, 6, 3, 7);
const v128_t y_hi = wasm_v32x4_shuffle(y, y_sign, 2, 6, 3, 7);
const v128_t z_hi = wasm_v32x4_shuffle(z, z_sign, 2, 6, 3, 7);
const v128_t w_hi = wasm_v32x4_shuffle(w, w_sign, 2, 6, 3, 7);
const v128_t x_product_lo = wasm_i64x2_add(wasm_i64x2_mul(x_lo, vmultiplier), vtwice_q31rounding);
const v128_t y_product_lo = wasm_i64x2_add(wasm_i64x2_mul(y_lo, vmultiplier), vtwice_q31rounding);
const v128_t z_product_lo = wasm_i64x2_add(wasm_i64x2_mul(z_lo, vmultiplier), vtwice_q31rounding);
const v128_t w_product_lo = wasm_i64x2_add(wasm_i64x2_mul(w_lo, vmultiplier), vtwice_q31rounding);
const v128_t x_product_hi = wasm_i64x2_add(wasm_i64x2_mul(x_hi, vmultiplier), vtwice_q31rounding);
const v128_t y_product_hi = wasm_i64x2_add(wasm_i64x2_mul(y_hi, vmultiplier), vtwice_q31rounding);
const v128_t z_product_hi = wasm_i64x2_add(wasm_i64x2_mul(z_hi, vmultiplier), vtwice_q31rounding);
const v128_t w_product_hi = wasm_i64x2_add(wasm_i64x2_mul(w_hi, vmultiplier), vtwice_q31rounding);
const v128_t x_q31product = wasm_v32x4_shuffle(x_product_lo, x_product_hi, 1, 3, 5, 7);
const v128_t y_q31product = wasm_v32x4_shuffle(y_product_lo, y_product_hi, 1, 3, 5, 7);
const v128_t z_q31product = wasm_v32x4_shuffle(z_product_lo, z_product_hi, 1, 3, 5, 7);
const v128_t w_q31product = wasm_v32x4_shuffle(w_product_lo, w_product_hi, 1, 3, 5, 7);
const v128_t x_remainder =
wasm_i32x4_add(wasm_v128_and(x_q31product, vremainder_mask), wasm_i32x4_shr(x_q31product, 31));
const v128_t y_remainder =
wasm_i32x4_add(wasm_v128_and(y_q31product, vremainder_mask), wasm_i32x4_shr(y_q31product, 31));
const v128_t z_remainder =
wasm_i32x4_add(wasm_v128_and(z_q31product, vremainder_mask), wasm_i32x4_shr(z_q31product, 31));
const v128_t w_remainder =
wasm_i32x4_add(wasm_v128_and(w_q31product, vremainder_mask), wasm_i32x4_shr(w_q31product, 31));
const v128_t x_scaled =
wasm_i32x4_sub(wasm_i32x4_shr(x_q31product, shift), wasm_i32x4_gt(x_remainder, vthreshold));
const v128_t y_scaled =
wasm_i32x4_sub(wasm_i32x4_shr(y_q31product, shift), wasm_i32x4_gt(y_remainder, vthreshold));
const v128_t z_scaled =
wasm_i32x4_sub(wasm_i32x4_shr(z_q31product, shift), wasm_i32x4_gt(z_remainder, vthreshold));
const v128_t w_scaled =
wasm_i32x4_sub(wasm_i32x4_shr(w_q31product, shift), wasm_i32x4_gt(w_remainder, vthreshold));
const v128_t xy_packed = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(x_scaled, y_scaled), vzero_point);
const v128_t zw_packed = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(z_scaled, w_scaled), vzero_point);
const v128_t xyzw_packed = wasm_i8x16_narrow_i16x8(xy_packed, zw_packed);
const v128_t xyzw_clamped = wasm_i8x16_min(wasm_i8x16_max(xyzw_packed, vqmin), vqmax);
// 12x v128.shuffle
// 8x i32x4.lt
// 8x i64x2.add
// 8x i64x2.mul
// 4x v128.and
// 4x i32x4.add
// 4x i32x4.sub
// 4x i32x4.gt
// 4x i32x4.shr_s
// 2x i16x8.narrow_i32x4_s
// 2x i16x8.add_saturate_s
// 1x i8x16.narrow_i16x8_s
// 1x i8x16.max_s
// 1x i8x16.min_s
// ---------------------
// 63 instructions total
wasm_v128_store(output, xyzw_clamped);
output += 16;
}
}
| 5,830
| 41.875
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-rndna-neon.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_rndna__neon(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const int32_t multiplier = ((int32_t) scale_bits & INT32_C(0x007FFFFF)) | INT32_C(0x00800000);
const int32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
#if defined(__aarch64__)
const int32x4_t vmultiplier = vdupq_n_s32(multiplier);
#else
const int32x2_t vmultiplier = vdup_n_s32(multiplier);
#endif
const int16x8_t vzero_point = vdupq_n_s16((int16_t) zero_point);
const int64x2_t vshift = vdupq_n_s64(-shift);
const int8x16_t vqmin = vdupq_n_s8(qmin);
const int8x16_t vqmax = vdupq_n_s8(qmax);
for (; n != 0; n -= 16) {
const int32x4_t x = vld1q_s32(input);
const int32x4_t y = vld1q_s32(input + 4);
const int32x4_t z = vld1q_s32(input + 8);
const int32x4_t w = vld1q_s32(input + 12);
input += 16;
const uint32x4_t x_neg_mask = vcltq_s32(x, vmovq_n_s32(0));
const uint32x4_t y_neg_mask = vcltq_s32(y, vmovq_n_s32(0));
const uint32x4_t z_neg_mask = vcltq_s32(z, vmovq_n_s32(0));
const uint32x4_t w_neg_mask = vcltq_s32(w, vmovq_n_s32(0));
#if defined(__aarch64__)
const int64x2_t x01_product = vmull_s32(vget_low_s32(x), vget_low_s32(vmultiplier));
const int64x2_t x23_product = vmull_high_s32(x, vmultiplier);
const int64x2_t y01_product = vmull_s32(vget_low_s32(y), vget_low_s32(vmultiplier));
const int64x2_t y23_product = vmull_high_s32(y, vmultiplier);
const int64x2_t z01_product = vmull_s32(vget_low_s32(z), vget_low_s32(vmultiplier));
const int64x2_t z23_product = vmull_high_s32(z, vmultiplier);
const int64x2_t w01_product = vmull_s32(vget_low_s32(w), vget_low_s32(vmultiplier));
const int64x2_t w23_product = vmull_high_s32(w, vmultiplier);
#else
const int64x2_t x01_product = vmull_s32(vget_low_s32(x), vmultiplier);
const int64x2_t x23_product = vmull_s32(vget_high_s32(x), vmultiplier);
const int64x2_t y01_product = vmull_s32(vget_low_s32(y), vmultiplier);
const int64x2_t y23_product = vmull_s32(vget_high_s32(y), vmultiplier);
const int64x2_t z01_product = vmull_s32(vget_low_s32(z), vmultiplier);
const int64x2_t z23_product = vmull_s32(vget_high_s32(z), vmultiplier);
const int64x2_t w01_product = vmull_s32(vget_low_s32(w), vmultiplier);
const int64x2_t w23_product = vmull_s32(vget_high_s32(w), vmultiplier);
#endif
#if defined(__aarch64__)
const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_neg_mask)));
const int64x2_t x23_adjusted_product = vaddw_high_s32(x23_product, vreinterpretq_s32_u32(x_neg_mask));
const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_neg_mask)));
const int64x2_t y23_adjusted_product = vaddw_high_s32(y23_product, vreinterpretq_s32_u32(y_neg_mask));
const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_neg_mask)));
const int64x2_t z23_adjusted_product = vaddw_high_s32(z23_product, vreinterpretq_s32_u32(z_neg_mask));
const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_neg_mask)));
const int64x2_t w23_adjusted_product = vaddw_high_s32(w23_product, vreinterpretq_s32_u32(w_neg_mask));
#else
const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_neg_mask)));
const int64x2_t x23_adjusted_product = vaddw_s32(x23_product, vreinterpret_s32_u32(vget_high_u32(x_neg_mask)));
const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_neg_mask)));
const int64x2_t y23_adjusted_product = vaddw_s32(y23_product, vreinterpret_s32_u32(vget_high_u32(y_neg_mask)));
const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_neg_mask)));
const int64x2_t z23_adjusted_product = vaddw_s32(z23_product, vreinterpret_s32_u32(vget_high_u32(z_neg_mask)));
const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_neg_mask)));
const int64x2_t w23_adjusted_product = vaddw_s32(w23_product, vreinterpret_s32_u32(vget_high_u32(w_neg_mask)));
#endif
const int64x2_t x01_scaled = vrshlq_s64(x01_adjusted_product, vshift);
const int64x2_t x23_scaled = vrshlq_s64(x23_adjusted_product, vshift);
const int64x2_t y01_scaled = vrshlq_s64(y01_adjusted_product, vshift);
const int64x2_t y23_scaled = vrshlq_s64(y23_adjusted_product, vshift);
const int64x2_t z01_scaled = vrshlq_s64(z01_adjusted_product, vshift);
const int64x2_t z23_scaled = vrshlq_s64(z23_adjusted_product, vshift);
const int64x2_t w01_scaled = vrshlq_s64(w01_adjusted_product, vshift);
const int64x2_t w23_scaled = vrshlq_s64(w23_adjusted_product, vshift);
#ifdef __aarch64__
const int32x4_t x_scaled = vuzp1q_s32(vreinterpretq_s32_s64(x01_scaled), vreinterpretq_s32_s64(x23_scaled));
const int32x4_t y_scaled = vuzp1q_s32(vreinterpretq_s32_s64(y01_scaled), vreinterpretq_s32_s64(y23_scaled));
const int32x4_t z_scaled = vuzp1q_s32(vreinterpretq_s32_s64(z01_scaled), vreinterpretq_s32_s64(z23_scaled));
const int32x4_t w_scaled = vuzp1q_s32(vreinterpretq_s32_s64(w01_scaled), vreinterpretq_s32_s64(w23_scaled));
const int16x8_t xy_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(x_scaled), y_scaled), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(z_scaled), w_scaled), vzero_point);
const int8x16_t xyzw_packed = vqmovn_high_s16(vqmovn_s16(xy_packed), zw_packed);
#else
const int32x4_t x_scaled = vcombine_s32(vmovn_s64(x01_scaled), vmovn_s64(x23_scaled));
const int32x4_t y_scaled = vcombine_s32(vmovn_s64(y01_scaled), vmovn_s64(y23_scaled));
const int32x4_t z_scaled = vcombine_s32(vmovn_s64(z01_scaled), vmovn_s64(z23_scaled));
const int32x4_t w_scaled = vcombine_s32(vmovn_s64(w01_scaled), vmovn_s64(w23_scaled));
const int16x8_t xy_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(x_scaled), vqmovn_s32(y_scaled)), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(z_scaled), vqmovn_s32(w_scaled)), vzero_point);
const int8x16_t xyzw_packed = vcombine_s8(vqmovn_s16(xy_packed), vqmovn_s16(zw_packed));
#endif
const int8x16_t xyzw_clamped = vmaxq_s8(vminq_s8(xyzw_packed, vqmax), vqmin);
// AArch32 version:
// 4x VCLT.S32 Qd, Qm, #0
// 8x VMULL.S32 Qd, Dm, Dn
// 8x VADDW.S32 Qd, Qm, Dn
// 8x VRSHL.S32 Qd, Qm, Qn
// 8x VMOVN.S64 Dd, Qm
// 4x VQMOVN.S32 Dd, Qm
// 2x VQADD.S16 Qd, Qm, Qn
// 2x VQMOVUN.S16 Dd, Qm
// 1x VMAX.U8 Qd, Qm, Qn
// 1x VMIN.U8 Qd, Qm, Qn
// ---------------------
// 46 instructions total
//
// AArch64 version:
// 4x CMLT Vd.4S, Vn.4S, #0
// 4x SMULL Vd.2D, Vn.2S, Vm.2S
// 4x SMULL2 Vd.2D, Vn.4S, Vm.4S
// 4x SADDW Vd.2D, Vn.2D, Vm.2S
// 4x SADDW2 Vd.2D, Vn.2D, Vm.4S
// 8x SRSHL Vd.2D, Vn.2D, Vm.2D
// 4x UZP1 Vd.4S, Vn.4S, Vm.4S
// 2x SQXTN Vd.4H, Vn.4S
// 2x SQXTN2 Vd.8H, Vn.4S
// 2x SQADD Vd.8H, Vn.8H, Vm.8H
// 1x SQXTN Vd.8B, Vn.8H
// 1x SQXTN2 Vd.16B, Vn.8H
// 1x SMIN Vd.16B, Vn.16B, Vm.16B
// 1x SMAX Vd.16B, Vn.16B, Vm.16B
// ---------------------
// 42 instructions total
vst1q_s8(output, xyzw_clamped);
output += 16;
}
}
| 8,065
| 47.590361
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-rndna-scalar-signed64.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_rndna__scalar_signed64(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const int32_t multiplier = ((int32_t) scale_bits & INT32_C(0x007FFFFF)) | INT32_C(0x00800000);
const uint32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
const int64_t rounding = INT64_C(1) << (shift - 1);
const int32_t smin = (int32_t) qmin - (int32_t) zero_point;
const int32_t smax = (int32_t) qmax - (int32_t) zero_point;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
// Compute full 64-bit product of signed 32-bit factors.
//
// Note: multiplier can be treated as either signed or unsigned.
const int64_t x_product = (int64_t) x * (int64_t) multiplier;
const int64_t y_product = (int64_t) y * (int64_t) multiplier;
const int64_t z_product = (int64_t) z * (int64_t) multiplier;
const int64_t w_product = (int64_t) w * (int64_t) multiplier;
// Adjust product before subsequent shift with rounding up to simulate shift with rounding away from zero.
const int64_t x_adjusted_product = x_product - (int64_t)(x < 0);
const int64_t y_adjusted_product = y_product - (int64_t)(y < 0);
const int64_t z_adjusted_product = z_product - (int64_t)(z < 0);
const int64_t w_adjusted_product = w_product - (int64_t)(w < 0);
// Arithmetically shift the full 64-bit product right with rounding.
// Rounding is performed towards closest integer, with midpoints rounded up.
//
// Note that although rounding is precomputed, it is dependent on shift value, and on processors with 64-bit
// "right shift with rounding" instruction each line below can be represented by just one such instruction
// (e.g. VRSHL.S64 on ARM NEON, SRSHL in ARM64 Advanced SIMD).
const int32_t x_scaled = (int32_t) math_asr_s64(x_adjusted_product + rounding, shift);
const int32_t y_scaled = (int32_t) math_asr_s64(y_adjusted_product + rounding, shift);
const int32_t z_scaled = (int32_t) math_asr_s64(z_adjusted_product + rounding, shift);
const int32_t w_scaled = (int32_t) math_asr_s64(w_adjusted_product + rounding, shift);
// Clamp scaled value with zero point between (qmin - zero point) and (qmax - zero point).
const int32_t x_clamped = math_min_s32(math_max_s32(x_scaled, smin), smax);
const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax);
const int32_t z_clamped = math_min_s32(math_max_s32(z_scaled, smin), smax);
const int32_t w_clamped = math_min_s32(math_max_s32(w_scaled, smin), smax);
// Add zero point to clamped value.
// The result is guaranteed to be in [qmin, qmax] range.
//
// This addition can not be safely done before clamping, because scaled values are in [-2147483520, 2147483519]
// range, so addition of zero point (which can be up to 127) can overflow signed 32-bit integer.
const int32_t x_biased = x_clamped + zero_point;
const int32_t y_biased = y_clamped + zero_point;
const int32_t z_biased = z_clamped + zero_point;
const int32_t w_biased = w_clamped + zero_point;
output[0] = (int8_t) x_biased;
output[1] = (int8_t) y_biased;
output[2] = (int8_t) z_biased;
output[3] = (int8_t) w_biased;
output += 4;
}
}
| 3,964
| 41.180851
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-rndna-scalar-unsigned32.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_rndna__scalar_unsigned32(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const uint32_t multiplier = (scale_bits << 8) | UINT32_C(0x80000000);
const uint32_t shift = 127 + 31 - (scale_bits >> 23);
assert(shift >= 32);
assert(shift < 64);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const uint32_t rounding_hi = (uint32_t)(rounding >> 32);
const uint32_t rounding_lo = (uint32_t) rounding;
const uint32_t shift_minus_32 = shift - 32;
const int32_t smin = (int32_t) qmin - (int32_t) zero_point;
const int32_t smax = (int32_t) qmax - (int32_t) zero_point;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
// Compute absolute value of input as unsigned 32-bit int.
// All further computations will work with unsigned values to avoid undefined behaviour on signed operations.
const uint32_t x_abs = (x >= 0) ? (uint32_t) x : -(uint32_t) x;
const uint32_t y_abs = (y >= 0) ? (uint32_t) y : -(uint32_t) y;
const uint32_t z_abs = (z >= 0) ? (uint32_t) z : -(uint32_t) z;
const uint32_t w_abs = (w >= 0) ? (uint32_t) w : -(uint32_t) w;
// Compute full 64-bit product of 32-bit factors.
const uint64_t x_product = (uint64_t) x_abs * (uint64_t) multiplier;
const uint64_t y_product = (uint64_t) y_abs * (uint64_t) multiplier;
const uint64_t z_product = (uint64_t) z_abs * (uint64_t) multiplier;
const uint64_t w_product = (uint64_t) w_abs * (uint64_t) multiplier;
// Shift the full 64-bit product right with rounding.
// Rounding is performed towards closest integer, with midpoints rounded up (same as away from zero).
//
// Generally, this operation requires both 64-bit addition and 64-bit shift, but we use two tricks to replace
// 64-bit operations with 32-bit operations.
//
// To avoid full 64-bit addition we make use of three facts:
// - 64-bit rounding value added before the shift is a power of 2, and thus has only one bit set.
// - When 0x1.0p-32f <= scale < 0x1.0p-31f, then the non-zero bit in rounding is in the low 32 bits, and
// rounding is exactly 0x80000000 (2**31), because rounding is 2**(scale-1) and scale >= 32. In this case,
// addition of rounding can affect high 32 bits of the product only through overflow, which happens if
// low 32-bit part of the product equals or exceeds 0x80000000. We can reformulate the latter condition
// as low 32-bit part of the product has the bit 31 set, and then overflow happens if both the low 32-bit part
// of the product and the low 32-bit part of the rounding value have bit 31 set. Since 32-bit numbers with the
// bit 31 set are negative when interpreted as signed integers, we can check the overflow condition as
// (int32_t) (LOW(product) & LOW(rounding)) < 0
// - When 0x1.0p-31f <= scale < 1.0f, then the non-zero bit is in the high 32 bits of rounding. We just need
// to do 32-bit addition of high 32 bits of rounding and high 32 bits of product. This addition never
// overflows because product <= 0x80000000 * 0xFFFFFF00 < 2**63 and rounding = 2**(scale-1) <= 2**62.
//
// To avoid full 64-bit shift, we leverage the fact that shift >= 32, and do it in two steps:
// - Shift by 32, which can be implemented by extacting the high 32-bit word on 32-bit systems.
// - Shift by (shift - 32), which can be implemented as a 32-bit shift of high word of addition result.
const uint32_t x_carry_lo = (uint32_t) ((int32_t) ((uint32_t) x_product & rounding_lo) < 0);
const uint32_t y_carry_lo = (uint32_t) ((int32_t) ((uint32_t) y_product & rounding_lo) < 0);
const uint32_t z_carry_lo = (uint32_t) ((int32_t) ((uint32_t) z_product & rounding_lo) < 0);
const uint32_t w_carry_lo = (uint32_t) ((int32_t) ((uint32_t) w_product & rounding_lo) < 0);
const uint32_t x_product_hi = (uint32_t) (x_product >> 32);
const uint32_t y_product_hi = (uint32_t) (y_product >> 32);
const uint32_t z_product_hi = (uint32_t) (z_product >> 32);
const uint32_t w_product_hi = (uint32_t) (w_product >> 32);
const uint32_t x_abs_scaled = (uint32_t) (x_product_hi + rounding_hi + x_carry_lo) >> shift_minus_32;
const uint32_t y_abs_scaled = (uint32_t) (y_product_hi + rounding_hi + y_carry_lo) >> shift_minus_32;
const uint32_t z_abs_scaled = (uint32_t) (z_product_hi + rounding_hi + z_carry_lo) >> shift_minus_32;
const uint32_t w_abs_scaled = (uint32_t) (w_product_hi + rounding_hi + w_carry_lo) >> shift_minus_32;
// Copy the sign of input to scaled absolute input value.
const int32_t x_scaled = (int32_t) (x >= 0 ? x_abs_scaled : -x_abs_scaled);
const int32_t y_scaled = (int32_t) (y >= 0 ? y_abs_scaled : -y_abs_scaled);
const int32_t z_scaled = (int32_t) (z >= 0 ? z_abs_scaled : -z_abs_scaled);
const int32_t w_scaled = (int32_t) (w >= 0 ? w_abs_scaled : -w_abs_scaled);
// Clamp scaled value with zero point between (qmin - zero point) and (qmax - zero point).
const int32_t x_clamped = math_min_s32(math_max_s32(x_scaled, smin), smax);
const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax);
const int32_t z_clamped = math_min_s32(math_max_s32(z_scaled, smin), smax);
const int32_t w_clamped = math_min_s32(math_max_s32(w_scaled, smin), smax);
// Add zero point to clamped value.
// The result is guaranteed to be in [qmin, qmax] range.
//
// This addition can not be safely done before clamping, because scaled values are in [-2147483520, 2147483519]
// range, so addition of zero point (which can be up to 127) can overflow signed 32-bit integer.
const int32_t x_biased = x_clamped + zero_point;
const int32_t y_biased = y_clamped + zero_point;
const int32_t z_biased = z_clamped + zero_point;
const int32_t w_biased = w_clamped + zero_point;
output[0] = (int8_t) x_biased;
output[1] = (int8_t) y_biased;
output[2] = (int8_t) z_biased;
output[3] = (int8_t) w_biased;
output += 4;
}
}
| 6,735
| 51.217054
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-rndna-scalar-unsigned64.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_rndna__scalar_unsigned64(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const uint32_t multiplier = (scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000);
const uint32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const int32_t smin = (int32_t) qmin - (int32_t) zero_point;
const int32_t smax = (int32_t) qmax - (int32_t) zero_point;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
// Compute absolute value of input as unsigned 32-bit int.
// All further computations will work with unsigned values to avoid undefined behaviour on signed operations.
const uint32_t x_abs = (x >= 0) ? (uint32_t) x : -(uint32_t) x;
const uint32_t y_abs = (y >= 0) ? (uint32_t) y : -(uint32_t) y;
const uint32_t z_abs = (z >= 0) ? (uint32_t) z : -(uint32_t) z;
const uint32_t w_abs = (w >= 0) ? (uint32_t) w : -(uint32_t) w;
// Compute full 64-bit product of 32-bit factors.
const uint64_t x_product = (uint64_t) x_abs * (uint64_t) multiplier;
const uint64_t y_product = (uint64_t) y_abs * (uint64_t) multiplier;
const uint64_t z_product = (uint64_t) z_abs * (uint64_t) multiplier;
const uint64_t w_product = (uint64_t) w_abs * (uint64_t) multiplier;
// Shift the full 64-bit product right with rounding.
// Rounding is performed towards closest integer, with midpoints rounded up (same as away from zero).
//
// Note that although rounding is precomputed, it is dependent on shift value, and on processors with 64-bit
// "right shift with rounding" instruction each line below can be represented by just one such instruction
// (e.g. VRSHL.U64 on ARM NEON, URSHL in ARM64 Advanced SIMD).
const uint32_t x_abs_scaled = (uint32_t) ((x_product + rounding) >> shift);
const uint32_t y_abs_scaled = (uint32_t) ((y_product + rounding) >> shift);
const uint32_t z_abs_scaled = (uint32_t) ((z_product + rounding) >> shift);
const uint32_t w_abs_scaled = (uint32_t) ((w_product + rounding) >> shift);
// Copy the sign of input to scaled absolute input value.
//
// On x86 processors with SSSE3 instruction set, this operation nicely maps to PSIGND instruction.
const int32_t x_scaled = (int32_t) (x >= 0 ? x_abs_scaled : -x_abs_scaled);
const int32_t y_scaled = (int32_t) (y >= 0 ? y_abs_scaled : -y_abs_scaled);
const int32_t z_scaled = (int32_t) (z >= 0 ? z_abs_scaled : -z_abs_scaled);
const int32_t w_scaled = (int32_t) (w >= 0 ? w_abs_scaled : -w_abs_scaled);
// Clamp scaled value with zero point between (qmin - zero point) and (qmax - zero point).
const int32_t x_clamped = math_min_s32(math_max_s32(x_scaled, smin), smax);
const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax);
const int32_t z_clamped = math_min_s32(math_max_s32(z_scaled, smin), smax);
const int32_t w_clamped = math_min_s32(math_max_s32(w_scaled, smin), smax);
// Add zero point to clamped value.
// The result is guaranteed to be in [qmin, qmax] range.
//
// This addition can not be safely done before clamping, because scaled values are in [-2147483520, 2147483519]
// range, so addition of zero point (which can be up to 127) can overflow signed 32-bit integer.
const int32_t x_biased = x_clamped + zero_point;
const int32_t y_biased = y_clamped + zero_point;
const int32_t z_biased = z_clamped + zero_point;
const int32_t w_biased = w_clamped + zero_point;
output[0] = (int8_t) x_biased;
output[1] = (int8_t) y_biased;
output[2] = (int8_t) z_biased;
output[3] = (int8_t) w_biased;
output += 4;
}
}
| 4,427
| 42.841584
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-rndna-sse2.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <emmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_rndna__sse2(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const uint32_t multiplier = (scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000);
const uint32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short) zero_point);
const __m128i vqmin = _mm_set1_epi8((short) qmin);
const __m128i vqmax = _mm_set1_epi8((short) qmax);
const __m128i vshift = _mm_cvtsi32_si128((int) shift);
const __m128i vrounding = _mm_set1_epi64x(rounding);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*) input);
const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
input += 16;
const __m128i x_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), x);
const __m128i y_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), y);
const __m128i z_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), z);
const __m128i w_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), w);
const __m128i x_abs0123 = _mm_sub_epi32(_mm_xor_si128(x, x_neg_mask), x_neg_mask);
const __m128i y_abs0123 = _mm_sub_epi32(_mm_xor_si128(y, y_neg_mask), y_neg_mask);
const __m128i z_abs0123 = _mm_sub_epi32(_mm_xor_si128(z, z_neg_mask), z_neg_mask);
const __m128i w_abs0123 = _mm_sub_epi32(_mm_xor_si128(w, w_neg_mask), w_neg_mask);
const __m128i x_abs1032 = _mm_shuffle_epi32(x_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_abs1032 = _mm_shuffle_epi32(y_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_abs1032 = _mm_shuffle_epi32(z_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_abs1032 = _mm_shuffle_epi32(w_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_absmul02 = _mm_mul_epu32(x_abs0123, vmultiplier);
const __m128i y_absmul02 = _mm_mul_epu32(y_abs0123, vmultiplier);
const __m128i z_absmul02 = _mm_mul_epu32(z_abs0123, vmultiplier);
const __m128i w_absmul02 = _mm_mul_epu32(w_abs0123, vmultiplier);
const __m128i x_absmul13 = _mm_mul_epu32(x_abs1032, vmultiplier);
const __m128i y_absmul13 = _mm_mul_epu32(y_abs1032, vmultiplier);
const __m128i z_absmul13 = _mm_mul_epu32(z_abs1032, vmultiplier);
const __m128i w_absmul13 = _mm_mul_epu32(w_abs1032, vmultiplier);
const __m128i x_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(x_absmul02, vrounding), vshift);
const __m128i x_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(x_absmul13, vrounding), vshift);
const __m128i y_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(y_absmul02, vrounding), vshift);
const __m128i y_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(y_absmul13, vrounding), vshift);
const __m128i z_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(z_absmul02, vrounding), vshift);
const __m128i z_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(z_absmul13, vrounding), vshift);
const __m128i w_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(w_absmul02, vrounding), vshift);
const __m128i w_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(w_absmul13, vrounding), vshift);
const __m128i x_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(x_abs_scaled02), _mm_castsi128_ps(x_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i y_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(y_abs_scaled02), _mm_castsi128_ps(y_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i z_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(z_abs_scaled02), _mm_castsi128_ps(z_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i w_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(w_abs_scaled02), _mm_castsi128_ps(w_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i x_abs_scaled = _mm_shuffle_epi32(x_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i y_abs_scaled = _mm_shuffle_epi32(y_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i z_abs_scaled = _mm_shuffle_epi32(z_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i w_abs_scaled = _mm_shuffle_epi32(w_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i x_scaled = _mm_sub_epi32(_mm_xor_si128(x_abs_scaled, x_neg_mask), x_neg_mask);
const __m128i y_scaled = _mm_sub_epi32(_mm_xor_si128(y_abs_scaled, y_neg_mask), y_neg_mask);
const __m128i z_scaled = _mm_sub_epi32(_mm_xor_si128(z_abs_scaled, z_neg_mask), z_neg_mask);
const __m128i w_scaled = _mm_sub_epi32(_mm_xor_si128(w_abs_scaled, w_neg_mask), w_neg_mask);
const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xy_clamped = _mm_max_epi16(_mm_min_epi16(xy_packed, vqmax), vqmin);
const __m128i zw_clamped = _mm_max_epi16(_mm_min_epi16(zw_packed, vqmax), vqmin);
const __m128i xyzw_clamped = _mm_packs_epi16(xy_clamped, zw_clamped);
// 4x PXOR (setzero)
// 8x PSUBD
// 8x PXOR
// 8x PSHUFD
// 8x PMULUDQ
// 8x PSRLQ
// 8x PADDQ
// 4x SHUFPS
// 2x PACKSSDW
// 2x PADDSW
// 2x PMAXSW
// 2x PMINSW
// 1x PACKSSWB
// ---------------------
// 63 instructions total
_mm_storeu_si128((__m128i*) output, xyzw_clamped);
output += 16;
}
}
| 6,191
| 46.267176
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-rndna-sse41.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <smmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_rndna__sse41(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const uint32_t multiplier = (scale_bits << 8) | UINT32_C(0x80000000);
const uint32_t shift = 127 + 31 - (scale_bits >> 23);
assert(shift >= 32);
assert(shift < 64);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short) zero_point);
const __m128i vqmin = _mm_set1_epi8((char) qmin);
const __m128i vqmax = _mm_set1_epi8((char) qmax);
const __m128i vshiftlo = _mm_cvtsi32_si128((int) shift);
const __m128i vshifthi = _mm_cvtsi32_si128((int) shift - 32);
const __m128i vrounding = _mm_set1_epi64x(rounding);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*) input);
const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
input += 16;
const __m128i x_abs0123 = _mm_abs_epi32(x);
const __m128i y_abs0123 = _mm_abs_epi32(y);
const __m128i z_abs0123 = _mm_abs_epi32(z);
const __m128i w_abs0123 = _mm_abs_epi32(w);
const __m128i x_abs1032 = _mm_shuffle_epi32(x_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_abs1032 = _mm_shuffle_epi32(y_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_abs1032 = _mm_shuffle_epi32(z_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_abs1032 = _mm_shuffle_epi32(w_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_absmul02 = _mm_mul_epu32(x_abs0123, vmultiplier);
const __m128i y_absmul02 = _mm_mul_epu32(y_abs0123, vmultiplier);
const __m128i z_absmul02 = _mm_mul_epu32(z_abs0123, vmultiplier);
const __m128i w_absmul02 = _mm_mul_epu32(w_abs0123, vmultiplier);
const __m128i x_absmul13 = _mm_mul_epu32(x_abs1032, vmultiplier);
const __m128i y_absmul13 = _mm_mul_epu32(y_abs1032, vmultiplier);
const __m128i z_absmul13 = _mm_mul_epu32(z_abs1032, vmultiplier);
const __m128i w_absmul13 = _mm_mul_epu32(w_abs1032, vmultiplier);
const __m128i x_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(x_absmul02, vrounding), vshiftlo);
const __m128i x_abs_scaled13 = _mm_srl_epi32(_mm_add_epi64(x_absmul13, vrounding), vshifthi);
const __m128i y_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(y_absmul02, vrounding), vshiftlo);
const __m128i y_abs_scaled13 = _mm_srl_epi32(_mm_add_epi64(y_absmul13, vrounding), vshifthi);
const __m128i z_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(z_absmul02, vrounding), vshiftlo);
const __m128i z_abs_scaled13 = _mm_srl_epi32(_mm_add_epi64(z_absmul13, vrounding), vshifthi);
const __m128i w_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(w_absmul02, vrounding), vshiftlo);
const __m128i w_abs_scaled13 = _mm_srl_epi32(_mm_add_epi64(w_absmul13, vrounding), vshifthi);
const __m128i x_abs_scaled = _mm_blend_epi16(x_abs_scaled02, x_abs_scaled13, 0xCC);
const __m128i y_abs_scaled = _mm_blend_epi16(y_abs_scaled02, y_abs_scaled13, 0xCC);
const __m128i z_abs_scaled = _mm_blend_epi16(z_abs_scaled02, z_abs_scaled13, 0xCC);
const __m128i w_abs_scaled = _mm_blend_epi16(w_abs_scaled02, w_abs_scaled13, 0xCC);
const __m128i x_scaled = _mm_sign_epi32(x_abs_scaled, x);
const __m128i y_scaled = _mm_sign_epi32(y_abs_scaled, y);
const __m128i z_scaled = _mm_sign_epi32(z_abs_scaled, z);
const __m128i w_scaled = _mm_sign_epi32(w_abs_scaled, w);
const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packs_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped = _mm_max_epi8(_mm_min_epi8(xyzw_packed, vqmax), vqmin);
// 4x PABSD
// 4x PSHUFD
// 8x PMULUDQ
// 4x PSRLQ
// 4x PSRLD
// 8x PADDQ
// 4x PBLENDW
// 4x PSIGND
// 2x PACKSSDW
// 2x PADDSW
// 1x PACKSSWB
// 1x PMAXSB
// 1x PMINSB
// ---------------------
// 47 instructions total
_mm_storeu_si128((__m128i*) output, xyzw_clamped);
output += 16;
}
}
| 4,855
| 40.504274
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-rndna-ssse3.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <tmmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_rndna__ssse3(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const uint32_t multiplier = (scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000);
const uint32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short) zero_point);
const __m128i vqmin = _mm_set1_epi8((char) qmin);
const __m128i vqmax = _mm_set1_epi8((char) qmax);
const __m128i vshift = _mm_cvtsi32_si128((int) shift);
const __m128i vrounding = _mm_set1_epi64x(rounding);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*) input);
const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
input += 16;
const __m128i x_abs0123 = _mm_abs_epi32(x);
const __m128i y_abs0123 = _mm_abs_epi32(y);
const __m128i z_abs0123 = _mm_abs_epi32(z);
const __m128i w_abs0123 = _mm_abs_epi32(w);
const __m128i x_abs1032 = _mm_shuffle_epi32(x_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_abs1032 = _mm_shuffle_epi32(y_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_abs1032 = _mm_shuffle_epi32(z_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_abs1032 = _mm_shuffle_epi32(w_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_absmul02 = _mm_mul_epu32(x_abs0123, vmultiplier);
const __m128i y_absmul02 = _mm_mul_epu32(y_abs0123, vmultiplier);
const __m128i z_absmul02 = _mm_mul_epu32(z_abs0123, vmultiplier);
const __m128i w_absmul02 = _mm_mul_epu32(w_abs0123, vmultiplier);
const __m128i x_absmul13 = _mm_mul_epu32(x_abs1032, vmultiplier);
const __m128i y_absmul13 = _mm_mul_epu32(y_abs1032, vmultiplier);
const __m128i z_absmul13 = _mm_mul_epu32(z_abs1032, vmultiplier);
const __m128i w_absmul13 = _mm_mul_epu32(w_abs1032, vmultiplier);
const __m128i x_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(x_absmul02, vrounding), vshift);
const __m128i x_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(x_absmul13, vrounding), vshift);
const __m128i y_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(y_absmul02, vrounding), vshift);
const __m128i y_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(y_absmul13, vrounding), vshift);
const __m128i z_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(z_absmul02, vrounding), vshift);
const __m128i z_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(z_absmul13, vrounding), vshift);
const __m128i w_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(w_absmul02, vrounding), vshift);
const __m128i w_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(w_absmul13, vrounding), vshift);
const __m128i x_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(x_abs_scaled02), _mm_castsi128_ps(x_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i y_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(y_abs_scaled02), _mm_castsi128_ps(y_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i z_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(z_abs_scaled02), _mm_castsi128_ps(z_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i w_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(w_abs_scaled02), _mm_castsi128_ps(w_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i x_abs_scaled = _mm_shuffle_epi32(x_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i y_abs_scaled = _mm_shuffle_epi32(y_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i z_abs_scaled = _mm_shuffle_epi32(z_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i w_abs_scaled = _mm_shuffle_epi32(w_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i x_scaled = _mm_sign_epi32(x_abs_scaled, x);
const __m128i y_scaled = _mm_sign_epi32(y_abs_scaled, y);
const __m128i z_scaled = _mm_sign_epi32(z_abs_scaled, z);
const __m128i w_scaled = _mm_sign_epi32(w_abs_scaled, w);
const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xy_clamped = _mm_max_epi16(_mm_min_epi16(xy_packed, vqmax), vqmin);
const __m128i zw_clamped = _mm_max_epi16(_mm_min_epi16(zw_packed, vqmax), vqmin);
const __m128i xyzw_clamped = _mm_packs_epi16(xy_clamped, zw_clamped);
// 4x PABSD
// 8x PSHUFD
// 8x PMULUDQ
// 8x PSRLQ
// 8x PADDQ
// 4x SHUFPS
// 4x PSIGND
// 2x PACKSSDW
// 2x PADDSW
// 2x PMAXSW
// 2x PMINSW
// 1x PACKSSWB
// ---------------------
// 53 instructions total
_mm_storeu_si128((__m128i*) output, xyzw_clamped);
output += 16;
}
}
| 5,582
| 43.664
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-rndnu-neon-mull.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_rndnu__neon_mull(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const int32_t multiplier = ((int32_t) scale_bits & INT32_C(0x007FFFFF)) | INT32_C(0x00800000);
const int32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
#if defined(__aarch64__)
const int32x4_t vmultiplier = vdupq_n_s32(multiplier);
#else
const int32x2_t vmultiplier = vdup_n_s32(multiplier);
#endif
const int16x8_t vzero_point = vdupq_n_s16((int16_t) zero_point);
const int64x2_t vshift = vdupq_n_s64(-shift);
const int8x16_t vqmin = vdupq_n_s8(qmin);
const int8x16_t vqmax = vdupq_n_s8(qmax);
for (; n != 0; n -= 16) {
const int32x4_t x = vld1q_s32(input);
const int32x4_t y = vld1q_s32(input + 4);
const int32x4_t z = vld1q_s32(input + 8);
const int32x4_t w = vld1q_s32(input + 12);
input += 16;
#if defined(__aarch64__)
const int64x2_t x01_product = vmull_s32(vget_low_s32(x), vget_low_s32(vmultiplier));
const int64x2_t x23_product = vmull_high_s32(x, vmultiplier);
const int64x2_t y01_product = vmull_s32(vget_low_s32(y), vget_low_s32(vmultiplier));
const int64x2_t y23_product = vmull_high_s32(y, vmultiplier);
const int64x2_t z01_product = vmull_s32(vget_low_s32(z), vget_low_s32(vmultiplier));
const int64x2_t z23_product = vmull_high_s32(z, vmultiplier);
const int64x2_t w01_product = vmull_s32(vget_low_s32(w), vget_low_s32(vmultiplier));
const int64x2_t w23_product = vmull_high_s32(w, vmultiplier);
#else
const int64x2_t x01_product = vmull_s32(vget_low_s32(x), vmultiplier);
const int64x2_t x23_product = vmull_s32(vget_high_s32(x), vmultiplier);
const int64x2_t y01_product = vmull_s32(vget_low_s32(y), vmultiplier);
const int64x2_t y23_product = vmull_s32(vget_high_s32(y), vmultiplier);
const int64x2_t z01_product = vmull_s32(vget_low_s32(z), vmultiplier);
const int64x2_t z23_product = vmull_s32(vget_high_s32(z), vmultiplier);
const int64x2_t w01_product = vmull_s32(vget_low_s32(w), vmultiplier);
const int64x2_t w23_product = vmull_s32(vget_high_s32(w), vmultiplier);
#endif
const int64x2_t x01_scaled = vrshlq_s64(x01_product, vshift);
const int64x2_t x23_scaled = vrshlq_s64(x23_product, vshift);
const int64x2_t y01_scaled = vrshlq_s64(y01_product, vshift);
const int64x2_t y23_scaled = vrshlq_s64(y23_product, vshift);
const int64x2_t z01_scaled = vrshlq_s64(z01_product, vshift);
const int64x2_t z23_scaled = vrshlq_s64(z23_product, vshift);
const int64x2_t w01_scaled = vrshlq_s64(w01_product, vshift);
const int64x2_t w23_scaled = vrshlq_s64(w23_product, vshift);
#ifdef __aarch64__
const int32x4_t x_scaled = vuzp1q_s32(vreinterpretq_s32_s64(x01_scaled), vreinterpretq_s32_s64(x23_scaled));
const int32x4_t y_scaled = vuzp1q_s32(vreinterpretq_s32_s64(y01_scaled), vreinterpretq_s32_s64(y23_scaled));
const int32x4_t z_scaled = vuzp1q_s32(vreinterpretq_s32_s64(z01_scaled), vreinterpretq_s32_s64(z23_scaled));
const int32x4_t w_scaled = vuzp1q_s32(vreinterpretq_s32_s64(w01_scaled), vreinterpretq_s32_s64(w23_scaled));
const int16x8_t xy_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(x_scaled), y_scaled), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(z_scaled), w_scaled), vzero_point);
const int8x16_t xyzw_packed = vqmovn_high_s16(vqmovn_s16(xy_packed), zw_packed);
#else
const int32x4_t x_scaled = vcombine_s32(vmovn_s64(x01_scaled), vmovn_s64(x23_scaled));
const int32x4_t y_scaled = vcombine_s32(vmovn_s64(y01_scaled), vmovn_s64(y23_scaled));
const int32x4_t z_scaled = vcombine_s32(vmovn_s64(z01_scaled), vmovn_s64(z23_scaled));
const int32x4_t w_scaled = vcombine_s32(vmovn_s64(w01_scaled), vmovn_s64(w23_scaled));
const int16x8_t xy_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(x_scaled), vqmovn_s32(y_scaled)), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(z_scaled), vqmovn_s32(w_scaled)), vzero_point);
const int8x16_t xyzw_packed = vcombine_s8(vqmovn_s16(xy_packed), vqmovn_s16(zw_packed));
#endif
const int8x16_t xyzw_clamped = vmaxq_s8(vminq_s8(xyzw_packed, vqmax), vqmin);
// AArch32 version:
// 8x VMULL.S32 Qd, Dm, Dn
// 8x VRSHL.S32 Qd, Qm, Qn
// 8x VMOVN.S64 Dd, Qm
// 4x VQMOVN.S32 Dd, Qm
// 2x VQADD.S16 Qd, Qm, Qn
// 2x VQMOVUN.S16 Dd, Qm
// 1x VMAX.U8 Qd, Qm, Qn
// 1x VMIN.U8 Qd, Qm, Qn
// ---------------------
// 34 instructions total
//
// AArch64 version:
// 4x SMULL Vd.2D, Vn.2S, Vm.2S
// 4x SMULL2 Vd.2D, Vn.4S, Vm.4S
// 8x SRSHL Vd.2D, Vn.2D, Vm.2D
// 4x UZP1 Vd.4S, Vn.4S, Vm.4S
// 2x SQXTN Vd.4H, Vn.4S
// 2x SQXTN2 Vd.8H, Vn.4S
// 2x SQADD Vd.8H, Vn.8H, Vm.8H
// 1x SQXTN Vd.8B, Vn.8H
// 1x SQXTN2 Vd.16B, Vn.8H
// 1x SMIN Vd.16B, Vn.16B, Vm.16B
// 1x SMAX Vd.16B, Vn.16B, Vm.16B
// ---------------------
// 30 instructions total
vst1q_s8(output, xyzw_clamped);
output += 16;
}
}
| 5,635
| 41.37594
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-rndnu-neon-qdmulh.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_rndnu__neon_qdmulh(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
// Multiplier is in [0x40000000, 0x7FFFFF80] range.
const int32_t multiplier = (int32_t) (((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
// Shift is in [0, 31] range.
const int32_t shift = 127 + 31 - 32 - (float_as_uint32(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
/* Split shift into pre_shift + post_shift, post_shift in [1, 31] range */
const int32_t post_shift = math_max_s32(shift, 1);
const int32_t pre_shift = shift - post_shift;
const int32x4_t vmultiplier = vdupq_n_s32(multiplier);
const int16x8_t vzero_point = vdupq_n_s16((int16_t) zero_point);
const int32x4_t vpre_shift = vdupq_n_s32(-pre_shift);
const int32x4_t vpost_shift = vdupq_n_s32(-post_shift);
const int8x16_t vqmin = vdupq_n_s8(qmin);
const int8x16_t vqmax = vdupq_n_s8(qmax);
for (; n != 0; n -= 16) {
const int32x4_t x = vld1q_s32(input);
const int32x4_t y = vld1q_s32(input + 4);
const int32x4_t z = vld1q_s32(input + 8);
const int32x4_t w = vld1q_s32(input + 12);
input += 16;
const int32x4_t x_preshifted = vshlq_s32(x, vpre_shift);
const int32x4_t y_preshifted = vshlq_s32(y, vpre_shift);
const int32x4_t z_preshifted = vshlq_s32(z, vpre_shift);
const int32x4_t w_preshifted = vshlq_s32(w, vpre_shift);
const int32x4_t x_product = vqdmulhq_s32(x_preshifted, vmultiplier);
const int32x4_t y_product = vqdmulhq_s32(y_preshifted, vmultiplier);
const int32x4_t z_product = vqdmulhq_s32(z_preshifted, vmultiplier);
const int32x4_t w_product = vqdmulhq_s32(w_preshifted, vmultiplier);
const int32x4_t x_scaled = vrshlq_s32(x_product, vpost_shift);
const int32x4_t y_scaled = vrshlq_s32(y_product, vpost_shift);
const int32x4_t z_scaled = vrshlq_s32(z_product, vpost_shift);
const int32x4_t w_scaled = vrshlq_s32(w_product, vpost_shift);
#ifdef __aarch64__
const int16x8_t xy_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(x_scaled), y_scaled), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(z_scaled), w_scaled), vzero_point);
const int8x16_t xyzw_packed = vqmovn_high_s16(vqmovn_s16(xy_packed), zw_packed);
#else
const int16x8_t xy_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(x_scaled), vqmovn_s32(y_scaled)), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(z_scaled), vqmovn_s32(w_scaled)), vzero_point);
const int8x16_t xyzw_packed = vcombine_s8(vqmovn_s16(xy_packed), vqmovn_s16(zw_packed));
#endif
const int8x16_t xyzw_clamped = vmaxq_s8(vminq_s8(xyzw_packed, vqmax), vqmin);
// AArch32 version:
// 4x VSHL.S32 Qd, Qm, Qn
// 4x VQDMULH.S32 Qd, Qm, Qn
// 4x VRSHL.S32 Qd, Qm, Qn
// 4x VQMOVN.S32 Dd, Qm
// 2x VQADD.S16 Qd, Qm, Qn
// 2x VQMOVUN.S16 Dd, Qm
// 1x VMAX.U8 Qd, Qm, Qn
// 1x VMIN.U8 Qd, Qm, Qn
// ---------------------
// 22 instructions total
//
// AArch64 version:
// 4x SSHL Vd.4S, Vn.4S, Vm.4S
// 4x SQDMULH Vd.4S, Vn.4S, Vm.4S
// 4x SRSHL 4d.4S, Vn.4S, Vm.4S
// 2x SQXTN Vd.4H, Vn.4S
// 2x SQXTN2 Vd.8H, Vn.4S
// 2x SQADD Vd.8H, Vn.8H, Vm.8H
// 1x SQXTN Vd.8B, Vn.8H
// 1x SQXTN2 Vd.16B, Vn.8H
// 1x SMIN Vd.16B, Vn.16B, Vm.16B
// 1x SMAX Vd.16B, Vn.16B, Vm.16B
// ---------------------
// 22 instructions total
vst1q_s8(output, xyzw_clamped);
output += 16;
}
}
| 4,158
| 35.165217
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-rndnu-scalar.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_rndnu__scalar(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const int32_t multiplier = ((int32_t) scale_bits & INT32_C(0x007FFFFF)) | INT32_C(0x00800000);
const uint32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
const int64_t rounding = INT64_C(1) << (shift - 1);
const int32_t smin = (int32_t) qmin - (int32_t) zero_point;
const int32_t smax = (int32_t) qmax - (int32_t) zero_point;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
// Compute full 64-bit product of signed 32-bit factors.
//
// Note: multiplier can be treated as either signed or unsigned.
const int64_t x_product = (int64_t) x * (int64_t) multiplier;
const int64_t y_product = (int64_t) y * (int64_t) multiplier;
const int64_t z_product = (int64_t) z * (int64_t) multiplier;
const int64_t w_product = (int64_t) w * (int64_t) multiplier;
// Arithmetically shift the full 64-bit product right with rounding.
// Rounding is performed towards closest integer, with midpoints rounded up.
//
// Note that although rounding is precomputed, it is dependent on shift value, and on processors with 64-bit
// "right shift with rounding" instruction each line below can be represented by just one such instruction
// (e.g. VRSHL.S64 on ARM NEON, SRSHL in ARM64 Advanced SIMD).
const int32_t x_scaled = (int32_t) math_asr_s64(x_product + rounding, shift);
const int32_t y_scaled = (int32_t) math_asr_s64(y_product + rounding, shift);
const int32_t z_scaled = (int32_t) math_asr_s64(z_product + rounding, shift);
const int32_t w_scaled = (int32_t) math_asr_s64(w_product + rounding, shift);
// Clamp scaled value with zero point between (qmin - zero point) and (qmax - zero point).
const int32_t x_clamped = math_min_s32(math_max_s32(x_scaled, smin), smax);
const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax);
const int32_t z_clamped = math_min_s32(math_max_s32(z_scaled, smin), smax);
const int32_t w_clamped = math_min_s32(math_max_s32(w_scaled, smin), smax);
// Add zero point to clamped value.
// The result is guaranteed to be in [qmin, qmax] range.
//
// This addition can not be safely done before clamping, because scaled values are in [-2147483520, 2147483519]
// range, so addition of zero point (which can be up to 127) can overflow signed 32-bit integer.
const int32_t x_biased = x_clamped + zero_point;
const int32_t y_biased = y_clamped + zero_point;
const int32_t z_biased = z_clamped + zero_point;
const int32_t w_biased = w_clamped + zero_point;
output[0] = (int8_t) x_biased;
output[1] = (int8_t) y_biased;
output[2] = (int8_t) z_biased;
output[3] = (int8_t) w_biased;
output += 4;
}
}
| 3,531
| 39.136364
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-rndnu-sse41-sra.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <smmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_rndnu__sse41_sra(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const int32_t multiplier = ((int32_t) (scale_bits << 7) & INT32_C(0x3FFFFF80)) | INT32_C(0x40000000);
const uint32_t shift = 127 + 30 - (scale_bits >> 23);
assert(shift >= 31);
assert(shift < 63);
const int64_t rounding = INT64_C(1) << (shift - 1);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short) zero_point);
const __m128i vqmin = _mm_set1_epi8((char) qmin);
const __m128i vqmax = _mm_set1_epi8((char) qmax);
const __m128i vshift = _mm_cvtsi32_si128((int) (shift - 31));
const __m128i vrounding = _mm_set1_epi64x(rounding);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*) input);
const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
input += 16;
const __m128i x_odd = _mm_shuffle_epi32(x, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i y_odd = _mm_shuffle_epi32(y, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i z_odd = _mm_shuffle_epi32(z, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i w_odd = _mm_shuffle_epi32(w, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i x_product02 = _mm_mul_epi32(x, vmultiplier);
const __m128i y_product02 = _mm_mul_epi32(y, vmultiplier);
const __m128i z_product02 = _mm_mul_epi32(z, vmultiplier);
const __m128i w_product02 = _mm_mul_epi32(w, vmultiplier);
const __m128i x_product13 = _mm_mul_epi32(x_odd, vmultiplier);
const __m128i y_product13 = _mm_mul_epi32(y_odd, vmultiplier);
const __m128i z_product13 = _mm_mul_epi32(z_odd, vmultiplier);
const __m128i w_product13 = _mm_mul_epi32(w_odd, vmultiplier);
const __m128i x_prescaled02 = _mm_srli_epi64(_mm_add_epi64(x_product02, vrounding), 31);
const __m128i x_prescaled13 = _mm_slli_epi64(_mm_add_epi64(x_product13, vrounding), 1);
const __m128i y_prescaled02 = _mm_srli_epi64(_mm_add_epi64(y_product02, vrounding), 31);
const __m128i y_prescaled13 = _mm_slli_epi64(_mm_add_epi64(y_product13, vrounding), 1);
const __m128i z_prescaled02 = _mm_srli_epi64(_mm_add_epi64(z_product02, vrounding), 31);
const __m128i z_prescaled13 = _mm_slli_epi64(_mm_add_epi64(z_product13, vrounding), 1);
const __m128i w_prescaled02 = _mm_srli_epi64(_mm_add_epi64(w_product02, vrounding), 31);
const __m128i w_prescaled13 = _mm_slli_epi64(_mm_add_epi64(w_product13, vrounding), 1);
const __m128i x_prescaled = _mm_blend_epi16(x_prescaled02, x_prescaled13, 0xCC);
const __m128i y_prescaled = _mm_blend_epi16(y_prescaled02, y_prescaled13, 0xCC);
const __m128i z_prescaled = _mm_blend_epi16(z_prescaled02, z_prescaled13, 0xCC);
const __m128i w_prescaled = _mm_blend_epi16(w_prescaled02, w_prescaled13, 0xCC);
const __m128i x_scaled = _mm_sra_epi32(x_prescaled, vshift);
const __m128i y_scaled = _mm_sra_epi32(y_prescaled, vshift);
const __m128i z_scaled = _mm_sra_epi32(z_prescaled, vshift);
const __m128i w_scaled = _mm_sra_epi32(w_prescaled, vshift);
const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packs_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped = _mm_max_epi8(_mm_min_epi8(xyzw_packed, vqmax), vqmin);
// 4x PSHUFD
// 8x PMULDQ
// 8x PADDQ
// 4x PSRLQ
// 4x PSLLQ
// 4x PBLENDW
// 4x PSRAD
// 2x PACKSSDW
// 2x PADDSW
// 1x PACKSSWB
// 1x PMAXSB
// 1x PMINSB
// ---------------------
// 43 instructions total
_mm_storeu_si128((__m128i*) output, xyzw_clamped);
output += 16;
}
}
| 4,408
| 40.205607
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-requantization/qs8-requantization-rndnu-sse41-srl.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <smmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_rndnu__sse41_srl(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const int32_t multiplier = ((int32_t) (scale_bits << 7) & INT32_C(0x3FFFFF80)) | INT32_C(0x40000000);
const uint32_t shift = 127 + 30 - (scale_bits >> 23);
assert(shift >= 31);
assert(shift < 63);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const uint64_t pre_shift_offset = UINT64_C(0x8000000000000000) | rounding;
const uint32_t post_shift_offset = (uint32_t) (UINT64_C(0x8000000000000000) >> shift);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short) zero_point);
const __m128i vqmin = _mm_set1_epi8((char) qmin);
const __m128i vqmax = _mm_set1_epi8((char) qmax);
const __m128i vshift = _mm_cvtsi32_si128((int) shift);
const __m128i vpre_offset = _mm_set1_epi64x((long long) pre_shift_offset);
const __m128i vpost_offset = _mm_set1_epi32((int) post_shift_offset);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*) input);
const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
input += 16;
const __m128i x_odd = _mm_shuffle_epi32(x, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i y_odd = _mm_shuffle_epi32(y, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i z_odd = _mm_shuffle_epi32(z, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i w_odd = _mm_shuffle_epi32(w, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i x_product02 = _mm_mul_epi32(x, vmultiplier);
const __m128i y_product02 = _mm_mul_epi32(y, vmultiplier);
const __m128i z_product02 = _mm_mul_epi32(z, vmultiplier);
const __m128i w_product02 = _mm_mul_epi32(w, vmultiplier);
const __m128i x_product13 = _mm_mul_epi32(x_odd, vmultiplier);
const __m128i y_product13 = _mm_mul_epi32(y_odd, vmultiplier);
const __m128i z_product13 = _mm_mul_epi32(z_odd, vmultiplier);
const __m128i w_product13 = _mm_mul_epi32(w_odd, vmultiplier);
const __m128i x_scaled02 = _mm_srl_epi64(_mm_add_epi64(x_product02, vpre_offset), vshift);
const __m128i x_scaled13 = _mm_srl_epi64(_mm_add_epi64(x_product13, vpre_offset), vshift);
const __m128i y_scaled02 = _mm_srl_epi64(_mm_add_epi64(y_product02, vpre_offset), vshift);
const __m128i y_scaled13 = _mm_srl_epi64(_mm_add_epi64(y_product13, vpre_offset), vshift);
const __m128i z_scaled02 = _mm_srl_epi64(_mm_add_epi64(z_product02, vpre_offset), vshift);
const __m128i z_scaled13 = _mm_srl_epi64(_mm_add_epi64(z_product13, vpre_offset), vshift);
const __m128i w_scaled02 = _mm_srl_epi64(_mm_add_epi64(w_product02, vpre_offset), vshift);
const __m128i w_scaled13 = _mm_srl_epi64(_mm_add_epi64(w_product13, vpre_offset), vshift);
const __m128i x_scaled = _mm_sub_epi32(_mm_blend_epi16(x_scaled02, _mm_slli_epi64(x_scaled13, 32), 0xCC), vpost_offset);
const __m128i y_scaled = _mm_sub_epi32(_mm_blend_epi16(y_scaled02, _mm_slli_epi64(y_scaled13, 32), 0xCC), vpost_offset);
const __m128i z_scaled = _mm_sub_epi32(_mm_blend_epi16(z_scaled02, _mm_slli_epi64(z_scaled13, 32), 0xCC), vpost_offset);
const __m128i w_scaled = _mm_sub_epi32(_mm_blend_epi16(w_scaled02, _mm_slli_epi64(w_scaled13, 32), 0xCC), vpost_offset);
const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packs_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped = _mm_max_epi8(_mm_min_epi8(xyzw_packed, vqmax), vqmin);
// 4x PSHUFD
// 8x PMULDQ
// 8x PADDQ
// 8x PSRLQ
// 4x PSLLD
// 4x PBLENDW
// 4x PSUBD
// 2x PACKSSDW
// 2x PADDSW
// 1x PACKSSWB
// 1x PMAXSB
// 1x PMINSB
// ---------------------
// 47 instructions total
_mm_storeu_si128((__m128i*) output, xyzw_clamped);
output += 16;
}
}
| 4,582
| 42.647619
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-avx-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__avx_mul16_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul16.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vb89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 8)));
input_a += 16;
input_b += 16;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
__m128i vbprod89ABCDEFhi = _mm_mulhi_epu16(vb89ABCDEF, vb_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
const __m128i vbprod89ABCDEFlo = _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vbprod89ABCDEFhi = _mm_add_epi16(vbprod89ABCDEFhi, _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
vbprod89ABCDEFhi = _mm_sub_epi16(vbprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vb89ABCDEF, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,965
| 50.727273
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-avx-mul16-ld64-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__avx_mul16_ld64_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul16.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vb89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 8)));
const __m128i vaGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 16)));
const __m128i vbGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 16)));
input_a += 24;
input_b += 24;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
__m128i vbprod89ABCDEFhi = _mm_mulhi_epu16(vb89ABCDEF, vb_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
const __m128i vbprod89ABCDEFlo = _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_lo);
__m128i vaprodGHIJKLMNhi = _mm_mulhi_epu16(vaGHIJKLMN, va_multiplier_lo);
__m128i vbprodGHIJKLMNhi = _mm_mulhi_epu16(vbGHIJKLMN, vb_multiplier_lo);
const __m128i vaprodGHIJKLMNlo = _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_lo);
const __m128i vbprodGHIJKLMNlo = _mm_mullo_epi16(vbGHIJKLMN, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vbprod89ABCDEFhi = _mm_add_epi16(vbprod89ABCDEFhi, _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_hi));
vaprodGHIJKLMNhi = _mm_add_epi16(vaprodGHIJKLMNhi, _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_hi));
vbprodGHIJKLMNhi = _mm_add_epi16(vbprodGHIJKLMNhi, _mm_mullo_epi16(vbGHIJKLMN, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
vbprod89ABCDEFhi = _mm_sub_epi16(vbprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vb89ABCDEF, 15), vb_multiplier_lo));
vaprodGHIJKLMNhi = _mm_sub_epi16(vaprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vaGHIJKLMN, 15), va_multiplier_lo));
vbprodGHIJKLMNhi = _mm_sub_epi16(vbprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vbGHIJKLMN, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vbprodGHIJKLMNlo, vbprodGHIJKLMNhi));
vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vbprodGHIJKLMNlo, vbprodGHIJKLMNhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = _mm_min_epi8(voutGHIJKLMNGHIJKLMN, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 9,829
| 55.171429
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-avx-mul16-ld64-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__avx_mul16_ld64_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul16.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vb89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 8)));
const __m128i vaGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 16)));
const __m128i vbGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 16)));
const __m128i vaOPQRSTUV = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 24)));
const __m128i vbOPQRSTUV = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 24)));
input_a += 32;
input_b += 32;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
__m128i vbprod89ABCDEFhi = _mm_mulhi_epu16(vb89ABCDEF, vb_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
const __m128i vbprod89ABCDEFlo = _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_lo);
__m128i vaprodGHIJKLMNhi = _mm_mulhi_epu16(vaGHIJKLMN, va_multiplier_lo);
__m128i vbprodGHIJKLMNhi = _mm_mulhi_epu16(vbGHIJKLMN, vb_multiplier_lo);
const __m128i vaprodGHIJKLMNlo = _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_lo);
const __m128i vbprodGHIJKLMNlo = _mm_mullo_epi16(vbGHIJKLMN, vb_multiplier_lo);
__m128i vaprodOPQRSTUVhi = _mm_mulhi_epu16(vaOPQRSTUV, va_multiplier_lo);
__m128i vbprodOPQRSTUVhi = _mm_mulhi_epu16(vbOPQRSTUV, vb_multiplier_lo);
const __m128i vaprodOPQRSTUVlo = _mm_mullo_epi16(vaOPQRSTUV, va_multiplier_lo);
const __m128i vbprodOPQRSTUVlo = _mm_mullo_epi16(vbOPQRSTUV, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vbprod89ABCDEFhi = _mm_add_epi16(vbprod89ABCDEFhi, _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_hi));
vaprodGHIJKLMNhi = _mm_add_epi16(vaprodGHIJKLMNhi, _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_hi));
vbprodGHIJKLMNhi = _mm_add_epi16(vbprodGHIJKLMNhi, _mm_mullo_epi16(vbGHIJKLMN, vb_multiplier_hi));
vaprodOPQRSTUVhi = _mm_add_epi16(vaprodOPQRSTUVhi, _mm_mullo_epi16(vaOPQRSTUV, va_multiplier_hi));
vbprodOPQRSTUVhi = _mm_add_epi16(vbprodOPQRSTUVhi, _mm_mullo_epi16(vbOPQRSTUV, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
vbprod89ABCDEFhi = _mm_sub_epi16(vbprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vb89ABCDEF, 15), vb_multiplier_lo));
vaprodGHIJKLMNhi = _mm_sub_epi16(vaprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vaGHIJKLMN, 15), va_multiplier_lo));
vbprodGHIJKLMNhi = _mm_sub_epi16(vbprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vbGHIJKLMN, 15), vb_multiplier_lo));
vaprodOPQRSTUVhi = _mm_sub_epi16(vaprodOPQRSTUVhi, _mm_and_si128(_mm_srai_epi16(vaOPQRSTUV, 15), va_multiplier_lo));
vbprodOPQRSTUVhi = _mm_sub_epi16(vbprodOPQRSTUVhi, _mm_and_si128(_mm_srai_epi16(vbOPQRSTUV, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccOPQR = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodOPQRSTUVlo, vaprodOPQRSTUVhi));
__m128i vaccSTUV = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodOPQRSTUVlo, vaprodOPQRSTUVhi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vbprodGHIJKLMNlo, vbprodGHIJKLMNhi));
vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vbprodGHIJKLMNlo, vbprodGHIJKLMNhi));
vaccOPQR = _mm_add_epi32(vaccOPQR, _mm_unpacklo_epi16(vbprodOPQRSTUVlo, vbprodOPQRSTUVhi));
vaccSTUV = _mm_add_epi32(vaccSTUV, _mm_unpackhi_epi16(vbprodOPQRSTUVlo, vbprodOPQRSTUVhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
vaccOPQR = _mm_sra_epi32(vaccOPQR, vshift);
vaccSTUV = _mm_sra_epi32(vaccSTUV, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i voutOPQRSTUV = _mm_adds_epi16(_mm_packs_epi32(vaccOPQR, vaccSTUV), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNOPQRSTUV = _mm_packs_epi16(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 11,391
| 58.333333
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-avx-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__avx_mul16_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul16.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 6,090
| 46.585938
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-avx-mul32-ld32-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__avx_mul32_ld32_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul32.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vb89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vbCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 12)));
input_a += 16;
input_b += 16;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vb89AB, vb_multiplier));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vbCDEF, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,947
| 46.206349
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-avx-mul32-ld32-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__avx_mul32_ld32_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul32.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vb89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vbCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 12)));
const __m128i vaGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 16)));
const __m128i vbGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 16)));
const __m128i vaKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 20)));
const __m128i vbKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 20)));
input_a += 24;
input_b += 24;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_mullo_epi32(vaGHIJ, va_multiplier));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_mullo_epi32(vaKLMN, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vb89AB, vb_multiplier));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vbCDEF, vb_multiplier));
vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vbGHIJ, vb_multiplier));
vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vbKLMN, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
const __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = _mm_min_epi8(voutGHIJKLMNGHIJKLMN, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,201
| 50.078014
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-avx-mul32-ld32-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__avx_mul32_ld32_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul32.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vb89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vbCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 12)));
const __m128i vaGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 16)));
const __m128i vbGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 16)));
const __m128i vaKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 20)));
const __m128i vbKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 20)));
const __m128i vaOPQR = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 24)));
const __m128i vbOPQR = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 24)));
const __m128i vaSTUV = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 28)));
const __m128i vbSTUV = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 28)));
input_a += 32;
input_b += 32;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_mullo_epi32(vaGHIJ, va_multiplier));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_mullo_epi32(vaKLMN, va_multiplier));
__m128i vaccOPQR = _mm_add_epi32(vbias, _mm_mullo_epi32(vaOPQR, va_multiplier));
__m128i vaccSTUV = _mm_add_epi32(vbias, _mm_mullo_epi32(vaSTUV, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vb89AB, vb_multiplier));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vbCDEF, vb_multiplier));
vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vbGHIJ, vb_multiplier));
vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vbKLMN, vb_multiplier));
vaccOPQR = _mm_add_epi32(vaccOPQR, _mm_mullo_epi32(vbOPQR, vb_multiplier));
vaccSTUV = _mm_add_epi32(vaccSTUV, _mm_mullo_epi32(vbSTUV, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
vaccOPQR = _mm_sra_epi32(vaccOPQR, vshift);
vaccSTUV = _mm_sra_epi32(vaccSTUV, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
const __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
const __m128i voutOPQRSTUV = _mm_adds_epi16(_mm_packs_epi32(vaccOPQR, vaccSTUV), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNOPQRSTUV = _mm_packs_epi16(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 8,153
| 52.644737
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-avx-mul32-ld32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__avx_mul32_ld32_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul32.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,682
| 43.179245
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-neon-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__neon_ld128_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const int8x16_t va_zero_point = vld1q_dup_s8(¶ms->neon.a_zero_point);
const int8x16_t vb_zero_point = vld1q_dup_s8(¶ms->neon.b_zero_point);
#else
const int8x8_t va_zero_point = vld1_dup_s8(¶ms->neon.a_zero_point);
const int8x8_t vb_zero_point = vld1_dup_s8(¶ms->neon.b_zero_point);
#endif
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vb_multiplier = vld1q_dup_s32(¶ms->neon.b_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x16_t va0123456789ABCDEF = vld1q_s8(input_a); input_a += 16;
const int8x16_t vb0123456789ABCDEF = vld1q_s8(input_b); input_b += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), vget_low_s8(va_zero_point));
const int16x8_t vxa89ABCDEF = vsubl_high_s8(va0123456789ABCDEF, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vget_low_s8(vb0123456789ABCDEF), vget_low_s8(vb_zero_point));
const int16x8_t vxb89ABCDEF = vsubl_high_s8(vb0123456789ABCDEF, vb_zero_point);
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(vget_high_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vget_low_s8(vb0123456789ABCDEF), vb_zero_point);
const int16x8_t vxb89ABCDEF = vsubl_s8(vget_high_s8(vb0123456789ABCDEF), vb_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmulq_s32(vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmulq_s32(vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vxb89ABCDEF)), vb_multiplier);
vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vxb89ABCDEF)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, vget_low_s8(va_zero_point));
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vget_low_s8(vb_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,216
| 45.395522
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-neon-ld128-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__neon_ld128_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const int8x16_t va_zero_point = vld1q_dup_s8(¶ms->neon.a_zero_point);
const int8x16_t vb_zero_point = vld1q_dup_s8(¶ms->neon.b_zero_point);
#else
const int8x8_t va_zero_point = vld1_dup_s8(¶ms->neon.a_zero_point);
const int8x8_t vb_zero_point = vld1_dup_s8(¶ms->neon.b_zero_point);
#endif
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vb_multiplier = vld1q_dup_s32(¶ms->neon.b_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const int8x16_t va0123456789ABCDEF = vld1q_s8(input_a); input_a += 16;
const int8x16_t vb0123456789ABCDEF = vld1q_s8(input_b); input_b += 16;
const int8x16_t vaGHIJKLMNOPQRSTUV = vld1q_s8(input_a); input_a += 16;
const int8x16_t vbGHIJKLMNOPQRSTUV = vld1q_s8(input_b); input_b += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), vget_low_s8(va_zero_point));
const int16x8_t vxa89ABCDEF = vsubl_high_s8(va0123456789ABCDEF, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vget_low_s8(vb0123456789ABCDEF), vget_low_s8(vb_zero_point));
const int16x8_t vxb89ABCDEF = vsubl_high_s8(vb0123456789ABCDEF, vb_zero_point);
const int16x8_t vxaGHIJKLMN = vsubl_s8(vget_low_s8(vaGHIJKLMNOPQRSTUV), vget_low_s8(va_zero_point));
const int16x8_t vxaOPQRSTUV = vsubl_high_s8(vaGHIJKLMNOPQRSTUV, va_zero_point);
const int16x8_t vxbGHIJKLMN = vsubl_s8(vget_low_s8(vbGHIJKLMNOPQRSTUV), vget_low_s8(vb_zero_point));
const int16x8_t vxbOPQRSTUV = vsubl_high_s8(vbGHIJKLMNOPQRSTUV, vb_zero_point);
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(vget_high_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vget_low_s8(vb0123456789ABCDEF), vb_zero_point);
const int16x8_t vxb89ABCDEF = vsubl_s8(vget_high_s8(vb0123456789ABCDEF), vb_zero_point);
const int16x8_t vxaGHIJKLMN = vsubl_s8(vget_low_s8(vaGHIJKLMNOPQRSTUV), va_zero_point);
const int16x8_t vxaOPQRSTUV = vsubl_s8(vget_high_s8(vaGHIJKLMNOPQRSTUV), va_zero_point);
const int16x8_t vxbGHIJKLMN = vsubl_s8(vget_low_s8(vbGHIJKLMNOPQRSTUV), vb_zero_point);
const int16x8_t vxbOPQRSTUV = vsubl_s8(vget_high_s8(vbGHIJKLMNOPQRSTUV), vb_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmulq_s32(vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmulq_s32(vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccGHIJ = vmulq_s32(vmovl_s16(vget_low_s16(vxaGHIJKLMN)), va_multiplier);
int32x4_t vaccKLMN = vmulq_s32(vmovl_s16(vget_high_s16(vxaGHIJKLMN)), va_multiplier);
int32x4_t vaccOPQR = vmulq_s32(vmovl_s16(vget_low_s16(vxaOPQRSTUV)), va_multiplier);
int32x4_t vaccSTUV = vmulq_s32(vmovl_s16(vget_high_s16(vxaOPQRSTUV)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vxb89ABCDEF)), vb_multiplier);
vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vxb89ABCDEF)), vb_multiplier);
vaccGHIJ = vmlaq_s32(vaccGHIJ, vmovl_s16(vget_low_s16(vxbGHIJKLMN)), vb_multiplier);
vaccKLMN = vmlaq_s32(vaccKLMN, vmovl_s16(vget_high_s16(vxbGHIJKLMN)), vb_multiplier);
vaccOPQR = vmlaq_s32(vaccOPQR, vmovl_s16(vget_low_s16(vxbOPQRSTUV)), vb_multiplier);
vaccSTUV = vmlaq_s32(vaccSTUV, vmovl_s16(vget_high_s16(vxbOPQRSTUV)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
vaccGHIJ = vrshlq_s32(vaccGHIJ, vright_shift);
vaccKLMN = vrshlq_s32(vaccKLMN, vright_shift);
vaccOPQR = vrshlq_s32(vaccOPQR, vright_shift);
vaccSTUV = vrshlq_s32(vaccSTUV, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
const int16x8_t vaccGHIJKLMN = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN)), voutput_zero_point);
const int16x8_t vaccOPQRSTUV = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV)), voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x16_t voutGHIJKLMNOPQRSTUV = vcombine_s8(vqmovn_s16(vaccGHIJKLMN), vqmovn_s16(vaccOPQRSTUV));
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_s8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_s8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1q_s8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, vget_low_s8(va_zero_point));
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vget_low_s8(vb_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 8,604
| 52.117284
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-neon-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(¶ms->neon.a_zero_point);
const int8x8_t vb_zero_point = vld1_dup_s8(¶ms->neon.b_zero_point);
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vb_multiplier = vld1q_dup_s32(¶ms->neon.b_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int8x8_t va89ABCDEF = vld1_s8(input_a); input_a += 8;
const int8x8_t vb89ABCDEF = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(va89ABCDEF, va_zero_point);
const int16x8_t vxb89ABCDEF = vsubl_s8(vb89ABCDEF, vb_zero_point);
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmulq_s32(vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmulq_s32(vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vxb89ABCDEF)), vb_multiplier);
vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vxb89ABCDEF)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,291
| 43.470588
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-neon-ld64-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(¶ms->neon.a_zero_point);
const int8x8_t vb_zero_point = vld1_dup_s8(¶ms->neon.b_zero_point);
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vb_multiplier = vld1q_dup_s32(¶ms->neon.b_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int8x8_t va89ABCDEF = vld1_s8(input_a); input_a += 8;
const int8x8_t vb89ABCDEF = vld1_s8(input_b); input_b += 8;
const int8x8_t vaGHIJKLMN = vld1_s8(input_a); input_a += 8;
const int8x8_t vbGHIJKLMN = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(va89ABCDEF, va_zero_point);
const int16x8_t vxb89ABCDEF = vsubl_s8(vb89ABCDEF, vb_zero_point);
const int16x8_t vxaGHIJKLMN = vsubl_s8(vaGHIJKLMN, va_zero_point);
const int16x8_t vxbGHIJKLMN = vsubl_s8(vbGHIJKLMN, vb_zero_point);
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmulq_s32(vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmulq_s32(vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccGHIJ = vmulq_s32(vmovl_s16(vget_low_s16(vxaGHIJKLMN)), va_multiplier);
int32x4_t vaccKLMN = vmulq_s32(vmovl_s16(vget_high_s16(vxaGHIJKLMN)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vxb89ABCDEF)), vb_multiplier);
vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vxb89ABCDEF)), vb_multiplier);
vaccGHIJ = vmlaq_s32(vaccGHIJ, vmovl_s16(vget_low_s16(vxbGHIJKLMN)), vb_multiplier);
vaccKLMN = vmlaq_s32(vaccKLMN, vmovl_s16(vget_high_s16(vxbGHIJKLMN)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
vaccGHIJ = vrshlq_s32(vaccGHIJ, vright_shift);
vaccKLMN = vrshlq_s32(vaccKLMN, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
const int16x8_t vaccGHIJKLMN = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN)), voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x8_t voutGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMN = vmax_s8(voutGHIJKLMN, vget_low_s8(voutput_min));
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMN = vmin_s8(voutGHIJKLMN, vget_low_s8(voutput_max));
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1_s8(output, voutGHIJKLMN); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,384
| 46.649254
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-neon-ld64-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(¶ms->neon.a_zero_point);
const int8x8_t vb_zero_point = vld1_dup_s8(¶ms->neon.b_zero_point);
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vb_multiplier = vld1q_dup_s32(¶ms->neon.b_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int8x8_t va89ABCDEF = vld1_s8(input_a); input_a += 8;
const int8x8_t vb89ABCDEF = vld1_s8(input_b); input_b += 8;
const int8x8_t vaGHIJKLMN = vld1_s8(input_a); input_a += 8;
const int8x8_t vbGHIJKLMN = vld1_s8(input_b); input_b += 8;
const int8x8_t vaOPQRSTUV = vld1_s8(input_a); input_a += 8;
const int8x8_t vbOPQRSTUV = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(va89ABCDEF, va_zero_point);
const int16x8_t vxb89ABCDEF = vsubl_s8(vb89ABCDEF, vb_zero_point);
const int16x8_t vxaGHIJKLMN = vsubl_s8(vaGHIJKLMN, va_zero_point);
const int16x8_t vxbGHIJKLMN = vsubl_s8(vbGHIJKLMN, vb_zero_point);
const int16x8_t vxaOPQRSTUV = vsubl_s8(vaOPQRSTUV, va_zero_point);
const int16x8_t vxbOPQRSTUV = vsubl_s8(vbOPQRSTUV, vb_zero_point);
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmulq_s32(vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmulq_s32(vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccGHIJ = vmulq_s32(vmovl_s16(vget_low_s16(vxaGHIJKLMN)), va_multiplier);
int32x4_t vaccKLMN = vmulq_s32(vmovl_s16(vget_high_s16(vxaGHIJKLMN)), va_multiplier);
int32x4_t vaccOPQR = vmulq_s32(vmovl_s16(vget_low_s16(vxaOPQRSTUV)), va_multiplier);
int32x4_t vaccSTUV = vmulq_s32(vmovl_s16(vget_high_s16(vxaOPQRSTUV)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vxb89ABCDEF)), vb_multiplier);
vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vxb89ABCDEF)), vb_multiplier);
vaccGHIJ = vmlaq_s32(vaccGHIJ, vmovl_s16(vget_low_s16(vxbGHIJKLMN)), vb_multiplier);
vaccKLMN = vmlaq_s32(vaccKLMN, vmovl_s16(vget_high_s16(vxbGHIJKLMN)), vb_multiplier);
vaccOPQR = vmlaq_s32(vaccOPQR, vmovl_s16(vget_low_s16(vxbOPQRSTUV)), vb_multiplier);
vaccSTUV = vmlaq_s32(vaccSTUV, vmovl_s16(vget_high_s16(vxbOPQRSTUV)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
vaccGHIJ = vrshlq_s32(vaccGHIJ, vright_shift);
vaccKLMN = vrshlq_s32(vaccKLMN, vright_shift);
vaccOPQR = vrshlq_s32(vaccOPQR, vright_shift);
vaccSTUV = vrshlq_s32(vaccSTUV, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
const int16x8_t vaccGHIJKLMN = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN)), voutput_zero_point);
const int16x8_t vaccOPQRSTUV = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV)), voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x16_t voutGHIJKLMNOPQRSTUV = vcombine_s8(vqmovn_s16(vaccGHIJKLMN), vqmovn_s16(vaccOPQRSTUV));
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_s8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_s8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1q_s8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,305
| 49.386207
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-neon-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(¶ms->neon.a_zero_point);
const int8x8_t vb_zero_point = vld1_dup_s8(¶ms->neon.b_zero_point);
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vb_multiplier = vld1q_dup_s32(¶ms->neon.b_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->neon.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const int8x8_t va01234567 = vld1_s8(input_a);
const int8x8_t vb01234567 = vld1_s8(input_b);
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
}
}
}
| 4,054
| 38.754902
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__scalar_x1(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias;
const int32_t va_multiplier = params->scalar.a_multiplier;
const int32_t vb_multiplier = params->scalar.b_multiplier;
const uint32_t vshift = params->scalar.shift;
const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
do {
const int32_t va = *input_a++;
const int32_t vb = *input_b++;
const int32_t vacc = vbias + va * va_multiplier + vb * vb_multiplier;
int32_t vout = math_asr_s32(vacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
*output++ = (int8_t) (vout + voutput_zero_point);
batch -= sizeof(int8_t);
} while (batch != 0);
}
| 1,603
| 31.08
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__scalar_x2(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias;
const int32_t va_multiplier = params->scalar.a_multiplier;
const int32_t vb_multiplier = params->scalar.b_multiplier;
const uint32_t vshift = params->scalar.shift;
const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
for (; batch >= 2 * sizeof(int8_t); batch -= 2 * sizeof(int8_t)) {
const int32_t va0 = input_a[0];
const int32_t va1 = input_a[1];
input_a += 2;
const int32_t vb0 = input_b[0];
int32_t vacc0 = vbias + va0 * va_multiplier;
const int32_t vb1 = input_b[1];
int32_t vacc1 = vbias + va1 * va_multiplier;
input_b += 2;
vacc0 += vb0 * vb_multiplier;
vacc1 += vb1 * vb_multiplier;
int32_t vout0 = math_asr_s32(vacc0, vshift);
int32_t vout1 = math_asr_s32(vacc1, vshift);
vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
vout0 += voutput_zero_point;
vout1 += voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const int32_t va = *input_a;
const int32_t vb = *input_b;
const int32_t vacc = vbias + va * va_multiplier + vb * vb_multiplier;
int32_t vout = math_asr_s32(vacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
*output++ = (int8_t) (vout + voutput_zero_point);
}
}
| 2,493
| 30.974359
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__scalar_x4(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias;
const int32_t va_multiplier = params->scalar.a_multiplier;
const int32_t vb_multiplier = params->scalar.b_multiplier;
const uint32_t vshift = params->scalar.shift;
const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
const int32_t va0 = input_a[0];
const int32_t va1 = input_a[1];
const int32_t va2 = input_a[2];
const int32_t va3 = input_a[3];
input_a += 4;
const int32_t vb0 = input_b[0];
int32_t vacc0 = vbias + va0 * va_multiplier;
const int32_t vb1 = input_b[1];
int32_t vacc1 = vbias + va1 * va_multiplier;
const int32_t vb2 = input_b[2];
int32_t vacc2 = vbias + va2 * va_multiplier;
const int32_t vb3 = input_b[3];
int32_t vacc3 = vbias + va3 * va_multiplier;
input_b += 4;
vacc0 += vb0 * vb_multiplier;
vacc1 += vb1 * vb_multiplier;
vacc2 += vb2 * vb_multiplier;
vacc3 += vb3 * vb_multiplier;
int32_t vout0 = math_asr_s32(vacc0, vshift);
int32_t vout1 = math_asr_s32(vacc1, vshift);
int32_t vout2 = math_asr_s32(vacc2, vshift);
int32_t vout3 = math_asr_s32(vacc3, vshift);
vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
vout2 = math_max_s32(vout2, voutput_min_less_zero_point);
vout3 = math_max_s32(vout3, voutput_min_less_zero_point);
vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
vout2 = math_min_s32(vout2, voutput_max_less_zero_point);
vout3 = math_min_s32(vout3, voutput_max_less_zero_point);
vout0 += voutput_zero_point;
vout1 += voutput_zero_point;
vout2 += voutput_zero_point;
vout3 += voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int32_t va = *input_a++;
const int32_t vb = *input_b++;
const int32_t vacc = vbias + va * va_multiplier + vb * vb_multiplier;
int32_t vout = math_asr_s32(vacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
*output++ = (int8_t) (vout + voutput_zero_point);
batch -= sizeof(int8_t);
} while (batch != 0);
}
}
| 3,364
| 32.65
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-sse2-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
__m128i va89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_a + 8));
__m128i vb89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_b + 8));
input_a += 16;
input_b += 16;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
vb01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vb01234567, vb01234567), 8);
va89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(va89ABCDEF, va89ABCDEF), 8);
vb89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vb89ABCDEF, vb89ABCDEF), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
__m128i vbprod89ABCDEFhi = _mm_mulhi_epu16(vb89ABCDEF, vb_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
const __m128i vbprod89ABCDEFlo = _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vbprod89ABCDEFhi = _mm_add_epi16(vbprod89ABCDEFhi, _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
vbprod89ABCDEFhi = _mm_sub_epi16(vbprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vb89ABCDEF, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout89ABCDEF = _mm_max_epi16(vout89ABCDEF, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
vout89ABCDEF = _mm_min_epi16(vout89ABCDEF, voutput_max);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
input_a += 8;
input_b += 8;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
vb01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vb01234567, vb01234567), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
}
batch = 0;
}
} while (batch != 0);
}
}
| 8,298
| 50.228395
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-sse2-mul16-ld64-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
__m128i va89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_a + 8));
__m128i vb89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_b + 8));
__m128i vaGHIJKLMN = _mm_loadl_epi64((const __m128i*) (input_a + 16));
__m128i vbGHIJKLMN = _mm_loadl_epi64((const __m128i*) (input_b + 16));
input_a += 24;
input_b += 24;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
vb01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vb01234567, vb01234567), 8);
va89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(va89ABCDEF, va89ABCDEF), 8);
vb89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vb89ABCDEF, vb89ABCDEF), 8);
vaGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vaGHIJKLMN, vaGHIJKLMN), 8);
vbGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vbGHIJKLMN, vbGHIJKLMN), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
__m128i vbprod89ABCDEFhi = _mm_mulhi_epu16(vb89ABCDEF, vb_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
const __m128i vbprod89ABCDEFlo = _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_lo);
__m128i vaprodGHIJKLMNhi = _mm_mulhi_epu16(vaGHIJKLMN, va_multiplier_lo);
__m128i vbprodGHIJKLMNhi = _mm_mulhi_epu16(vbGHIJKLMN, vb_multiplier_lo);
const __m128i vaprodGHIJKLMNlo = _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_lo);
const __m128i vbprodGHIJKLMNlo = _mm_mullo_epi16(vbGHIJKLMN, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vbprod89ABCDEFhi = _mm_add_epi16(vbprod89ABCDEFhi, _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_hi));
vaprodGHIJKLMNhi = _mm_add_epi16(vaprodGHIJKLMNhi, _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_hi));
vbprodGHIJKLMNhi = _mm_add_epi16(vbprodGHIJKLMNhi, _mm_mullo_epi16(vbGHIJKLMN, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
vbprod89ABCDEFhi = _mm_sub_epi16(vbprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vb89ABCDEF, 15), vb_multiplier_lo));
vaprodGHIJKLMNhi = _mm_sub_epi16(vaprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vaGHIJKLMN, 15), va_multiplier_lo));
vbprodGHIJKLMNhi = _mm_sub_epi16(vbprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vbGHIJKLMN, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vbprodGHIJKLMNlo, vbprodGHIJKLMNhi));
vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vbprodGHIJKLMNlo, vbprodGHIJKLMNhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout89ABCDEF = _mm_max_epi16(vout89ABCDEF, voutput_min);
voutGHIJKLMN = _mm_max_epi16(voutGHIJKLMN, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
vout89ABCDEF = _mm_min_epi16(vout89ABCDEF, voutput_max);
voutGHIJKLMN = _mm_min_epi16(voutGHIJKLMN, voutput_max);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(batch != 0) {
do {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
input_a += 8;
input_b += 8;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
vb01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vb01234567, vb01234567), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
}
batch = 0;
}
} while (batch != 0);
}
}
| 10,240
| 54.356757
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-sse2-mul16-ld64-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
__m128i va89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_a + 8));
__m128i vb89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_b + 8));
__m128i vaGHIJKLMN = _mm_loadl_epi64((const __m128i*) (input_a + 16));
__m128i vbGHIJKLMN = _mm_loadl_epi64((const __m128i*) (input_b + 16));
__m128i vaOPQRSTUV = _mm_loadl_epi64((const __m128i*) (input_a + 24));
__m128i vbOPQRSTUV = _mm_loadl_epi64((const __m128i*) (input_b + 24));
input_a += 32;
input_b += 32;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
vb01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vb01234567, vb01234567), 8);
va89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(va89ABCDEF, va89ABCDEF), 8);
vb89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vb89ABCDEF, vb89ABCDEF), 8);
vaGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vaGHIJKLMN, vaGHIJKLMN), 8);
vbGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vbGHIJKLMN, vbGHIJKLMN), 8);
vaOPQRSTUV = _mm_srai_epi16(_mm_unpacklo_epi8(vaOPQRSTUV, vaOPQRSTUV), 8);
vbOPQRSTUV = _mm_srai_epi16(_mm_unpacklo_epi8(vbOPQRSTUV, vbOPQRSTUV), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
__m128i vbprod89ABCDEFhi = _mm_mulhi_epu16(vb89ABCDEF, vb_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
const __m128i vbprod89ABCDEFlo = _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_lo);
__m128i vaprodGHIJKLMNhi = _mm_mulhi_epu16(vaGHIJKLMN, va_multiplier_lo);
__m128i vbprodGHIJKLMNhi = _mm_mulhi_epu16(vbGHIJKLMN, vb_multiplier_lo);
const __m128i vaprodGHIJKLMNlo = _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_lo);
const __m128i vbprodGHIJKLMNlo = _mm_mullo_epi16(vbGHIJKLMN, vb_multiplier_lo);
__m128i vaprodOPQRSTUVhi = _mm_mulhi_epu16(vaOPQRSTUV, va_multiplier_lo);
__m128i vbprodOPQRSTUVhi = _mm_mulhi_epu16(vbOPQRSTUV, vb_multiplier_lo);
const __m128i vaprodOPQRSTUVlo = _mm_mullo_epi16(vaOPQRSTUV, va_multiplier_lo);
const __m128i vbprodOPQRSTUVlo = _mm_mullo_epi16(vbOPQRSTUV, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vbprod89ABCDEFhi = _mm_add_epi16(vbprod89ABCDEFhi, _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_hi));
vaprodGHIJKLMNhi = _mm_add_epi16(vaprodGHIJKLMNhi, _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_hi));
vbprodGHIJKLMNhi = _mm_add_epi16(vbprodGHIJKLMNhi, _mm_mullo_epi16(vbGHIJKLMN, vb_multiplier_hi));
vaprodOPQRSTUVhi = _mm_add_epi16(vaprodOPQRSTUVhi, _mm_mullo_epi16(vaOPQRSTUV, va_multiplier_hi));
vbprodOPQRSTUVhi = _mm_add_epi16(vbprodOPQRSTUVhi, _mm_mullo_epi16(vbOPQRSTUV, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
vbprod89ABCDEFhi = _mm_sub_epi16(vbprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vb89ABCDEF, 15), vb_multiplier_lo));
vaprodGHIJKLMNhi = _mm_sub_epi16(vaprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vaGHIJKLMN, 15), va_multiplier_lo));
vbprodGHIJKLMNhi = _mm_sub_epi16(vbprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vbGHIJKLMN, 15), vb_multiplier_lo));
vaprodOPQRSTUVhi = _mm_sub_epi16(vaprodOPQRSTUVhi, _mm_and_si128(_mm_srai_epi16(vaOPQRSTUV, 15), va_multiplier_lo));
vbprodOPQRSTUVhi = _mm_sub_epi16(vbprodOPQRSTUVhi, _mm_and_si128(_mm_srai_epi16(vbOPQRSTUV, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccOPQR = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodOPQRSTUVlo, vaprodOPQRSTUVhi));
__m128i vaccSTUV = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodOPQRSTUVlo, vaprodOPQRSTUVhi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vbprodGHIJKLMNlo, vbprodGHIJKLMNhi));
vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vbprodGHIJKLMNlo, vbprodGHIJKLMNhi));
vaccOPQR = _mm_add_epi32(vaccOPQR, _mm_unpacklo_epi16(vbprodOPQRSTUVlo, vbprodOPQRSTUVhi));
vaccSTUV = _mm_add_epi32(vaccSTUV, _mm_unpackhi_epi16(vbprodOPQRSTUVlo, vbprodOPQRSTUVhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
vaccOPQR = _mm_sra_epi32(vaccOPQR, vshift);
vaccSTUV = _mm_sra_epi32(vaccSTUV, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i voutOPQRSTUV = _mm_adds_epi16(_mm_packs_epi32(vaccOPQR, vaccSTUV), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout89ABCDEF = _mm_max_epi16(vout89ABCDEF, voutput_min);
voutGHIJKLMN = _mm_max_epi16(voutGHIJKLMN, voutput_min);
voutOPQRSTUV = _mm_max_epi16(voutOPQRSTUV, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
vout89ABCDEF = _mm_min_epi16(vout89ABCDEF, voutput_max);
voutGHIJKLMN = _mm_min_epi16(voutGHIJKLMN, voutput_max);
voutOPQRSTUV = _mm_min_epi16(voutOPQRSTUV, voutput_max);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNOPQRSTUV = _mm_packs_epi16(voutGHIJKLMN, voutOPQRSTUV);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
input_a += 8;
input_b += 8;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
vb01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vb01234567, vb01234567), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
}
batch = 0;
}
} while (batch != 0);
}
}
| 12,032
| 57.412621
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-sse2-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
input_a += 8;
input_b += 8;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
vb01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vb01234567, vb01234567), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
vb01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vb01234567, vb01234567), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
}
}
}
}
| 6,192
| 45.916667
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-sse41-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul16.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vb89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 8)));
input_a += 16;
input_b += 16;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
__m128i vbprod89ABCDEFhi = _mm_mulhi_epu16(vb89ABCDEF, vb_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
const __m128i vbprod89ABCDEFlo = _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vbprod89ABCDEFhi = _mm_add_epi16(vbprod89ABCDEFhi, _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
vbprod89ABCDEFhi = _mm_sub_epi16(vbprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vb89ABCDEF, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,967
| 50.74026
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-sse41-mul16-ld64-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul16.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vb89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 8)));
const __m128i vaGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 16)));
const __m128i vbGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 16)));
input_a += 24;
input_b += 24;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
__m128i vbprod89ABCDEFhi = _mm_mulhi_epu16(vb89ABCDEF, vb_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
const __m128i vbprod89ABCDEFlo = _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_lo);
__m128i vaprodGHIJKLMNhi = _mm_mulhi_epu16(vaGHIJKLMN, va_multiplier_lo);
__m128i vbprodGHIJKLMNhi = _mm_mulhi_epu16(vbGHIJKLMN, vb_multiplier_lo);
const __m128i vaprodGHIJKLMNlo = _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_lo);
const __m128i vbprodGHIJKLMNlo = _mm_mullo_epi16(vbGHIJKLMN, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vbprod89ABCDEFhi = _mm_add_epi16(vbprod89ABCDEFhi, _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_hi));
vaprodGHIJKLMNhi = _mm_add_epi16(vaprodGHIJKLMNhi, _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_hi));
vbprodGHIJKLMNhi = _mm_add_epi16(vbprodGHIJKLMNhi, _mm_mullo_epi16(vbGHIJKLMN, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
vbprod89ABCDEFhi = _mm_sub_epi16(vbprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vb89ABCDEF, 15), vb_multiplier_lo));
vaprodGHIJKLMNhi = _mm_sub_epi16(vaprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vaGHIJKLMN, 15), va_multiplier_lo));
vbprodGHIJKLMNhi = _mm_sub_epi16(vbprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vbGHIJKLMN, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vbprodGHIJKLMNlo, vbprodGHIJKLMNhi));
vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vbprodGHIJKLMNlo, vbprodGHIJKLMNhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = _mm_min_epi8(voutGHIJKLMNGHIJKLMN, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 9,831
| 55.182857
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-sse41-mul16-ld64-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul16.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vb89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 8)));
const __m128i vaGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 16)));
const __m128i vbGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 16)));
const __m128i vaOPQRSTUV = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 24)));
const __m128i vbOPQRSTUV = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 24)));
input_a += 32;
input_b += 32;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
__m128i vbprod89ABCDEFhi = _mm_mulhi_epu16(vb89ABCDEF, vb_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
const __m128i vbprod89ABCDEFlo = _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_lo);
__m128i vaprodGHIJKLMNhi = _mm_mulhi_epu16(vaGHIJKLMN, va_multiplier_lo);
__m128i vbprodGHIJKLMNhi = _mm_mulhi_epu16(vbGHIJKLMN, vb_multiplier_lo);
const __m128i vaprodGHIJKLMNlo = _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_lo);
const __m128i vbprodGHIJKLMNlo = _mm_mullo_epi16(vbGHIJKLMN, vb_multiplier_lo);
__m128i vaprodOPQRSTUVhi = _mm_mulhi_epu16(vaOPQRSTUV, va_multiplier_lo);
__m128i vbprodOPQRSTUVhi = _mm_mulhi_epu16(vbOPQRSTUV, vb_multiplier_lo);
const __m128i vaprodOPQRSTUVlo = _mm_mullo_epi16(vaOPQRSTUV, va_multiplier_lo);
const __m128i vbprodOPQRSTUVlo = _mm_mullo_epi16(vbOPQRSTUV, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vbprod89ABCDEFhi = _mm_add_epi16(vbprod89ABCDEFhi, _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_hi));
vaprodGHIJKLMNhi = _mm_add_epi16(vaprodGHIJKLMNhi, _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_hi));
vbprodGHIJKLMNhi = _mm_add_epi16(vbprodGHIJKLMNhi, _mm_mullo_epi16(vbGHIJKLMN, vb_multiplier_hi));
vaprodOPQRSTUVhi = _mm_add_epi16(vaprodOPQRSTUVhi, _mm_mullo_epi16(vaOPQRSTUV, va_multiplier_hi));
vbprodOPQRSTUVhi = _mm_add_epi16(vbprodOPQRSTUVhi, _mm_mullo_epi16(vbOPQRSTUV, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
vbprod89ABCDEFhi = _mm_sub_epi16(vbprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vb89ABCDEF, 15), vb_multiplier_lo));
vaprodGHIJKLMNhi = _mm_sub_epi16(vaprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vaGHIJKLMN, 15), va_multiplier_lo));
vbprodGHIJKLMNhi = _mm_sub_epi16(vbprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vbGHIJKLMN, 15), vb_multiplier_lo));
vaprodOPQRSTUVhi = _mm_sub_epi16(vaprodOPQRSTUVhi, _mm_and_si128(_mm_srai_epi16(vaOPQRSTUV, 15), va_multiplier_lo));
vbprodOPQRSTUVhi = _mm_sub_epi16(vbprodOPQRSTUVhi, _mm_and_si128(_mm_srai_epi16(vbOPQRSTUV, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccOPQR = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodOPQRSTUVlo, vaprodOPQRSTUVhi));
__m128i vaccSTUV = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodOPQRSTUVlo, vaprodOPQRSTUVhi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vbprodGHIJKLMNlo, vbprodGHIJKLMNhi));
vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vbprodGHIJKLMNlo, vbprodGHIJKLMNhi));
vaccOPQR = _mm_add_epi32(vaccOPQR, _mm_unpacklo_epi16(vbprodOPQRSTUVlo, vbprodOPQRSTUVhi));
vaccSTUV = _mm_add_epi32(vaccSTUV, _mm_unpackhi_epi16(vbprodOPQRSTUVlo, vbprodOPQRSTUVhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
vaccOPQR = _mm_sra_epi32(vaccOPQR, vshift);
vaccSTUV = _mm_sra_epi32(vaccSTUV, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i voutOPQRSTUV = _mm_adds_epi16(_mm_packs_epi32(vaccOPQR, vaccSTUV), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNOPQRSTUV = _mm_packs_epi16(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 11,393
| 58.34375
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-sse41-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul16.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 6,092
| 46.601563
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-sse41-mul32-ld32-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__sse41_mul32_ld32_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul32.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vb89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vbCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 12)));
input_a += 16;
input_b += 16;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vb89AB, vb_multiplier));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vbCDEF, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,949
| 46.222222
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-sse41-mul32-ld32-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__sse41_mul32_ld32_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul32.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vb89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vbCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 12)));
const __m128i vaGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 16)));
const __m128i vbGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 16)));
const __m128i vaKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 20)));
const __m128i vbKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 20)));
input_a += 24;
input_b += 24;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_mullo_epi32(vaGHIJ, va_multiplier));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_mullo_epi32(vaKLMN, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vb89AB, vb_multiplier));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vbCDEF, vb_multiplier));
vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vbGHIJ, vb_multiplier));
vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vbKLMN, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
const __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = _mm_min_epi8(voutGHIJKLMNGHIJKLMN, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,203
| 50.092199
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-sse41-mul32-ld32-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__sse41_mul32_ld32_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul32.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vb89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vbCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 12)));
const __m128i vaGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 16)));
const __m128i vbGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 16)));
const __m128i vaKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 20)));
const __m128i vbKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 20)));
const __m128i vaOPQR = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 24)));
const __m128i vbOPQR = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 24)));
const __m128i vaSTUV = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 28)));
const __m128i vbSTUV = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 28)));
input_a += 32;
input_b += 32;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_mullo_epi32(vaGHIJ, va_multiplier));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_mullo_epi32(vaKLMN, va_multiplier));
__m128i vaccOPQR = _mm_add_epi32(vbias, _mm_mullo_epi32(vaOPQR, va_multiplier));
__m128i vaccSTUV = _mm_add_epi32(vbias, _mm_mullo_epi32(vaSTUV, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vb89AB, vb_multiplier));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vbCDEF, vb_multiplier));
vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vbGHIJ, vb_multiplier));
vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vbKLMN, vb_multiplier));
vaccOPQR = _mm_add_epi32(vaccOPQR, _mm_mullo_epi32(vbOPQR, vb_multiplier));
vaccSTUV = _mm_add_epi32(vaccSTUV, _mm_mullo_epi32(vbSTUV, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
vaccOPQR = _mm_sra_epi32(vaccOPQR, vshift);
vaccSTUV = _mm_sra_epi32(vaccSTUV, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
const __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
const __m128i voutOPQRSTUV = _mm_adds_epi16(_mm_packs_epi32(vaccOPQR, vaccSTUV), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNOPQRSTUV = _mm_packs_epi16(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 8,155
| 52.657895
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-sse41-mul32-ld32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__sse41_mul32_ld32_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul32.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,684
| 43.198113
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__wasmsimd_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vbias = wasm_v128_load64_splat(params->wasmsimd.bias);
const v128_t va_multiplier = wasm_v128_load64_splat(params->wasmsimd.a_multiplier);
const v128_t vb_multiplier = wasm_v128_load64_splat(params->wasmsimd.b_multiplier);
const uint32_t vshift = params->wasmsimd.shift;
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_i16x8_load8x8(input_b);
const v128_t va89ABCDEF = wasm_i16x8_load8x8(input_a + 8);
const v128_t vb89ABCDEF = wasm_i16x8_load8x8(input_b + 8);
input_a += 16;
input_b += 16;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
v128_t vacc89AB = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va89ABCDEF), va_multiplier));
v128_t vaccCDEF = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va89ABCDEF), va_multiplier));
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vb01234567), vb_multiplier));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vb01234567), vb_multiplier));
vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vb89ABCDEF), vb_multiplier));
vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vb89ABCDEF), vb_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
vacc89AB = wasm_i32x4_shr(vacc89AB, vshift);
vaccCDEF = wasm_i32x4_shr(vaccCDEF, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout89ABCDEF = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF), voutput_zero_point);
v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = wasm_i8x16_max(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_i16x8_load8x8(input_b);
input_a += 8;
input_b += 8;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vb01234567), vb_multiplier));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vb01234567), vb_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,252
| 43.516949
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-wasmsimd-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__wasmsimd_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vbias = wasm_v128_load64_splat(params->wasmsimd.bias);
const v128_t va_multiplier = wasm_v128_load64_splat(params->wasmsimd.a_multiplier);
const v128_t vb_multiplier = wasm_v128_load64_splat(params->wasmsimd.b_multiplier);
const uint32_t vshift = params->wasmsimd.shift;
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.output_max);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_i16x8_load8x8(input_b);
const v128_t va89ABCDEF = wasm_i16x8_load8x8(input_a + 8);
const v128_t vb89ABCDEF = wasm_i16x8_load8x8(input_b + 8);
const v128_t vaGHIJKLMN = wasm_i16x8_load8x8(input_a + 16);
const v128_t vbGHIJKLMN = wasm_i16x8_load8x8(input_b + 16);
input_a += 24;
input_b += 24;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
v128_t vacc89AB = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va89ABCDEF), va_multiplier));
v128_t vaccCDEF = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va89ABCDEF), va_multiplier));
v128_t vaccGHIJ = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vaGHIJKLMN), va_multiplier));
v128_t vaccKLMN = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vaGHIJKLMN), va_multiplier));
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vb01234567), vb_multiplier));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vb01234567), vb_multiplier));
vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vb89ABCDEF), vb_multiplier));
vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vb89ABCDEF), vb_multiplier));
vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vbGHIJKLMN), vb_multiplier));
vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vbGHIJKLMN), vb_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
vacc89AB = wasm_i32x4_shr(vacc89AB, vshift);
vaccCDEF = wasm_i32x4_shr(vaccCDEF, vshift);
vaccGHIJ = wasm_i32x4_shr(vaccGHIJ, vshift);
vaccKLMN = wasm_i32x4_shr(vaccKLMN, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout89ABCDEF = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF), voutput_zero_point);
v128_t voutGHIJKLMN = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN), voutput_zero_point);
v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
v128_t voutGHIJKLMNGHIJKLMN = wasm_i8x16_narrow_i16x8(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = wasm_i8x16_max(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = wasm_i8x16_max(voutGHIJKLMNGHIJKLMN, voutput_min);
vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = wasm_i8x16_min(voutGHIJKLMNGHIJKLMN, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
wasm_v128_store64_lane(output + 16, voutGHIJKLMNGHIJKLMN, 0);
output += 24;
}
if XNN_UNLIKELY(batch != 0) {
do {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_i16x8_load8x8(input_b);
input_a += 8;
input_b += 8;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vb01234567), vb_multiplier));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vb01234567), vb_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,360
| 47.557252
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-wasmsimd-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__wasmsimd_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vbias = wasm_v128_load64_splat(params->wasmsimd.bias);
const v128_t va_multiplier = wasm_v128_load64_splat(params->wasmsimd.a_multiplier);
const v128_t vb_multiplier = wasm_v128_load64_splat(params->wasmsimd.b_multiplier);
const uint32_t vshift = params->wasmsimd.shift;
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.output_max);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_i16x8_load8x8(input_b);
const v128_t va89ABCDEF = wasm_i16x8_load8x8(input_a + 8);
const v128_t vb89ABCDEF = wasm_i16x8_load8x8(input_b + 8);
const v128_t vaGHIJKLMN = wasm_i16x8_load8x8(input_a + 16);
const v128_t vbGHIJKLMN = wasm_i16x8_load8x8(input_b + 16);
const v128_t vaOPQRSTUV = wasm_i16x8_load8x8(input_a + 24);
const v128_t vbOPQRSTUV = wasm_i16x8_load8x8(input_b + 24);
input_a += 32;
input_b += 32;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
v128_t vacc89AB = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va89ABCDEF), va_multiplier));
v128_t vaccCDEF = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va89ABCDEF), va_multiplier));
v128_t vaccGHIJ = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vaGHIJKLMN), va_multiplier));
v128_t vaccKLMN = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vaGHIJKLMN), va_multiplier));
v128_t vaccOPQR = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vaOPQRSTUV), va_multiplier));
v128_t vaccSTUV = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vaOPQRSTUV), va_multiplier));
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vb01234567), vb_multiplier));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vb01234567), vb_multiplier));
vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vb89ABCDEF), vb_multiplier));
vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vb89ABCDEF), vb_multiplier));
vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vbGHIJKLMN), vb_multiplier));
vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vbGHIJKLMN), vb_multiplier));
vaccOPQR = wasm_i32x4_add(vaccOPQR, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vbOPQRSTUV), vb_multiplier));
vaccSTUV = wasm_i32x4_add(vaccSTUV, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vbOPQRSTUV), vb_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
vacc89AB = wasm_i32x4_shr(vacc89AB, vshift);
vaccCDEF = wasm_i32x4_shr(vaccCDEF, vshift);
vaccGHIJ = wasm_i32x4_shr(vaccGHIJ, vshift);
vaccKLMN = wasm_i32x4_shr(vaccKLMN, vshift);
vaccOPQR = wasm_i32x4_shr(vaccOPQR, vshift);
vaccSTUV = wasm_i32x4_shr(vaccSTUV, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout89ABCDEF = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF), voutput_zero_point);
v128_t voutGHIJKLMN = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN), voutput_zero_point);
v128_t voutOPQRSTUV = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vaccOPQR, vaccSTUV), voutput_zero_point);
v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
v128_t voutGHIJKLMNOPQRSTUV = wasm_i8x16_narrow_i16x8(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = wasm_i8x16_max(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = wasm_i8x16_max(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = wasm_i8x16_min(voutGHIJKLMNOPQRSTUV, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
wasm_v128_store(output + 16, voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_i16x8_load8x8(input_b);
input_a += 8;
input_b += 8;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vb01234567), vb_multiplier));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vb01234567), vb_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,149
| 50.071429
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__wasmsimd_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vbias = wasm_v128_load64_splat(params->wasmsimd.bias);
const v128_t va_multiplier = wasm_v128_load64_splat(params->wasmsimd.a_multiplier);
const v128_t vb_multiplier = wasm_v128_load64_splat(params->wasmsimd.b_multiplier);
const uint32_t vshift = params->wasmsimd.shift;
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_i16x8_load8x8(input_b);
input_a += 8;
input_b += 8;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vb01234567), vb_multiplier));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vb01234567), vb_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_i16x8_load8x8(input_b);
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vb01234567), vb_multiplier));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vb01234567), vb_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
}
}
}
| 4,152
| 40.53
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-xop-mul32-ld32-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__xop_mul32_ld32_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul32.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vb89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vbCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 12)));
input_a += 16;
input_b += 16;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
__m128i vacc89AB = _mm_macc_epi32(va89AB, va_multiplier, vbias);
__m128i vaccCDEF = _mm_macc_epi32(vaCDEF, va_multiplier, vbias);
vacc0123 = _mm_macc_epi32(vb0123, vb_multiplier, vacc0123);
vacc4567 = _mm_macc_epi32(vb4567, vb_multiplier, vacc4567);
vacc89AB = _mm_macc_epi32(vb89AB, vb_multiplier, vacc89AB);
vaccCDEF = _mm_macc_epi32(vbCDEF, vb_multiplier, vaccCDEF);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_macc_epi32(vb0123, vb_multiplier, vacc0123);
vacc4567 = _mm_macc_epi32(vb4567, vb_multiplier, vacc4567);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,808
| 43.684615
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-xop-mul32-ld32-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__xop_mul32_ld32_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul32.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vb89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vbCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 12)));
const __m128i vaGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 16)));
const __m128i vbGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 16)));
const __m128i vaKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 20)));
const __m128i vbKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 20)));
input_a += 24;
input_b += 24;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
__m128i vacc89AB = _mm_macc_epi32(va89AB, va_multiplier, vbias);
__m128i vaccCDEF = _mm_macc_epi32(vaCDEF, va_multiplier, vbias);
__m128i vaccGHIJ = _mm_macc_epi32(vaGHIJ, va_multiplier, vbias);
__m128i vaccKLMN = _mm_macc_epi32(vaKLMN, va_multiplier, vbias);
vacc0123 = _mm_macc_epi32(vb0123, vb_multiplier, vacc0123);
vacc4567 = _mm_macc_epi32(vb4567, vb_multiplier, vacc4567);
vacc89AB = _mm_macc_epi32(vb89AB, vb_multiplier, vacc89AB);
vaccCDEF = _mm_macc_epi32(vbCDEF, vb_multiplier, vaccCDEF);
vaccGHIJ = _mm_macc_epi32(vbGHIJ, vb_multiplier, vaccGHIJ);
vaccKLMN = _mm_macc_epi32(vbKLMN, vb_multiplier, vaccKLMN);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
const __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = _mm_min_epi8(voutGHIJKLMNGHIJKLMN, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_macc_epi32(vb0123, vb_multiplier, vacc0123);
vacc4567 = _mm_macc_epi32(vb4567, vb_multiplier, vacc4567);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,998
| 47.268966
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-xop-mul32-ld32-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__xop_mul32_ld32_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul32.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vb89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vbCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 12)));
const __m128i vaGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 16)));
const __m128i vbGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 16)));
const __m128i vaKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 20)));
const __m128i vbKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 20)));
const __m128i vaOPQR = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 24)));
const __m128i vbOPQR = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 24)));
const __m128i vaSTUV = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 28)));
const __m128i vbSTUV = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 28)));
input_a += 32;
input_b += 32;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
__m128i vacc89AB = _mm_macc_epi32(va89AB, va_multiplier, vbias);
__m128i vaccCDEF = _mm_macc_epi32(vaCDEF, va_multiplier, vbias);
__m128i vaccGHIJ = _mm_macc_epi32(vaGHIJ, va_multiplier, vbias);
__m128i vaccKLMN = _mm_macc_epi32(vaKLMN, va_multiplier, vbias);
__m128i vaccOPQR = _mm_macc_epi32(vaOPQR, va_multiplier, vbias);
__m128i vaccSTUV = _mm_macc_epi32(vaSTUV, va_multiplier, vbias);
vacc0123 = _mm_macc_epi32(vb0123, vb_multiplier, vacc0123);
vacc4567 = _mm_macc_epi32(vb4567, vb_multiplier, vacc4567);
vacc89AB = _mm_macc_epi32(vb89AB, vb_multiplier, vacc89AB);
vaccCDEF = _mm_macc_epi32(vbCDEF, vb_multiplier, vaccCDEF);
vaccGHIJ = _mm_macc_epi32(vbGHIJ, vb_multiplier, vaccGHIJ);
vaccKLMN = _mm_macc_epi32(vbKLMN, vb_multiplier, vaccKLMN);
vaccOPQR = _mm_macc_epi32(vbOPQR, vb_multiplier, vaccOPQR);
vaccSTUV = _mm_macc_epi32(vbSTUV, vb_multiplier, vaccSTUV);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
vaccOPQR = _mm_sra_epi32(vaccOPQR, vshift);
vaccSTUV = _mm_sra_epi32(vaccSTUV, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
const __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
const __m128i voutOPQRSTUV = _mm_adds_epi16(_mm_packs_epi32(vaccOPQR, vaccSTUV), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNOPQRSTUV = _mm_packs_epi16(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_macc_epi32(vb0123, vb_multiplier, vacc0123);
vacc4567 = _mm_macc_epi32(vb4567, vb_multiplier, vacc4567);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,886
| 49.557692
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vadd/gen/qs8-vadd-minmax-xop-mul32-ld32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vadd_minmax_ukernel__xop_mul32_ld32_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul32.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_macc_epi32(vb0123, vb_multiplier, vacc0123);
vacc4567 = _mm_macc_epi32(vb4567, vb_multiplier, vacc4567);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_macc_epi32(vb0123, vb_multiplier, vacc0123);
vacc4567 = _mm_macc_epi32(vb4567, vb_multiplier, vacc4567);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,607
| 40.890909
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-avx-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__avx_mul16_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse4_mul16.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse4_mul16.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
input_a += 16;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,826
| 44.170543
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-avx-mul16-ld64-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__avx_mul16_ld64_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse4_mul16.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse4_mul16.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vaGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 16)));
input_a += 24;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
__m128i vaprodGHIJKLMNhi = _mm_mulhi_epu16(vaGHIJKLMN, va_multiplier_lo);
const __m128i vaprodGHIJKLMNlo = _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vaprodGHIJKLMNhi = _mm_add_epi16(vaprodGHIJKLMNhi, _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
vaprodGHIJKLMNhi = _mm_sub_epi16(vaprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vaGHIJKLMN, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = _mm_min_epi8(voutGHIJKLMNGHIJKLMN, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,012
| 48.041958
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-avx-mul16-ld64-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__avx_mul16_ld64_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse4_mul16.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse4_mul16.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vaGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 16)));
const __m128i vaOPQRSTUV = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 24)));
input_a += 32;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
__m128i vaprodGHIJKLMNhi = _mm_mulhi_epu16(vaGHIJKLMN, va_multiplier_lo);
const __m128i vaprodGHIJKLMNlo = _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_lo);
__m128i vaprodOPQRSTUVhi = _mm_mulhi_epu16(vaOPQRSTUV, va_multiplier_lo);
const __m128i vaprodOPQRSTUVlo = _mm_mullo_epi16(vaOPQRSTUV, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vaprodGHIJKLMNhi = _mm_add_epi16(vaprodGHIJKLMNhi, _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_hi));
vaprodOPQRSTUVhi = _mm_add_epi16(vaprodOPQRSTUVhi, _mm_mullo_epi16(vaOPQRSTUV, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
vaprodGHIJKLMNhi = _mm_sub_epi16(vaprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vaGHIJKLMN, 15), va_multiplier_lo));
vaprodOPQRSTUVhi = _mm_sub_epi16(vaprodOPQRSTUVhi, _mm_and_si128(_mm_srai_epi16(vaOPQRSTUV, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccOPQR = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodOPQRSTUVlo, vaprodOPQRSTUVhi));
__m128i vaccSTUV = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodOPQRSTUVlo, vaprodOPQRSTUVhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
vaccOPQR = _mm_sra_epi32(vaccOPQR, vshift);
vaccSTUV = _mm_sra_epi32(vaccSTUV, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i voutOPQRSTUV = _mm_adds_epi16(_mm_packs_epi32(vaccOPQR, vaccSTUV), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNOPQRSTUV = _mm_packs_epi16(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,896
| 50.614379
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-avx-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__avx_mul16_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse4_mul16.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse4_mul16.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,649
| 40.891892
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-avx-mul32-ld32-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__avx_mul32_ld32_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4_mul32.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4_mul32.bias));
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
input_a += 16;
input_b += 16;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 4,887
| 42.642857
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-avx-mul32-ld32-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__avx_mul32_ld32_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4_mul32.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4_mul32.bias));
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vaGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 16)));
const __m128i vaKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 20)));
input_a += 24;
input_b += 24;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_mullo_epi32(vaGHIJ, va_multiplier));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_mullo_epi32(vaKLMN, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
const __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = _mm_min_epi8(voutGHIJKLMNGHIJKLMN, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,771
| 45.926829
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-avx-mul32-ld32-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__avx_mul32_ld32_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4_mul32.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4_mul32.bias));
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vaGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 16)));
const __m128i vaKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 20)));
const __m128i vaOPQR = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 24)));
const __m128i vaSTUV = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 28)));
input_a += 32;
input_b += 32;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_mullo_epi32(vaGHIJ, va_multiplier));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_mullo_epi32(vaKLMN, va_multiplier));
__m128i vaccOPQR = _mm_add_epi32(vbias, _mm_mullo_epi32(vaOPQR, va_multiplier));
__m128i vaccSTUV = _mm_add_epi32(vbias, _mm_mullo_epi32(vaSTUV, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
vaccOPQR = _mm_sra_epi32(vaccOPQR, vshift);
vaccSTUV = _mm_sra_epi32(vaccSTUV, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
const __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
const __m128i voutOPQRSTUV = _mm_adds_epi16(_mm_packs_epi32(vaccOPQR, vaccSTUV), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNOPQRSTUV = _mm_packs_epi16(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,353
| 47.876923
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-avx-mul32-ld32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__avx_mul32_ld32_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4_mul32.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4_mul32.bias));
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,011
| 40.360825
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-neon-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__neon_ld128_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const int8x16_t va_zero_point = vld1q_dup_s8(¶ms->neon.a_zero_point);
#else
const int8x8_t va_zero_point = vld1_dup_s8(¶ms->neon.a_zero_point);
#endif
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max);
const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
const int32_t vb = params->neon.b_multiplier;
const int32x4_t vbias = vdupq_n_s32(vxb * vb);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x16_t va0123456789ABCDEF = vld1q_s8(input_a); input_a += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), vget_low_s8(va_zero_point));
const int16x8_t vxa89ABCDEF = vsubl_high_s8(va0123456789ABCDEF, va_zero_point);
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(vget_high_s8(va0123456789ABCDEF), va_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, vget_low_s8(va_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
#endif
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 4,959
| 40.680672
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-neon-ld128-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__neon_ld128_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const int8x16_t va_zero_point = vld1q_dup_s8(¶ms->neon.a_zero_point);
#else
const int8x8_t va_zero_point = vld1_dup_s8(¶ms->neon.a_zero_point);
#endif
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max);
const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
const int32_t vb = params->neon.b_multiplier;
const int32x4_t vbias = vdupq_n_s32(vxb * vb);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const int8x16_t va0123456789ABCDEF = vld1q_s8(input_a); input_a += 16;
const int8x16_t vaGHIJKLMNOPQRSTUV = vld1q_s8(input_a); input_a += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), vget_low_s8(va_zero_point));
const int16x8_t vxa89ABCDEF = vsubl_high_s8(va0123456789ABCDEF, va_zero_point);
const int16x8_t vxaGHIJKLMN = vsubl_s8(vget_low_s8(vaGHIJKLMNOPQRSTUV), vget_low_s8(va_zero_point));
const int16x8_t vxaOPQRSTUV = vsubl_high_s8(vaGHIJKLMNOPQRSTUV, va_zero_point);
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(vget_high_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxaGHIJKLMN = vsubl_s8(vget_low_s8(vaGHIJKLMNOPQRSTUV), va_zero_point);
const int16x8_t vxaOPQRSTUV = vsubl_s8(vget_high_s8(vaGHIJKLMNOPQRSTUV), va_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccGHIJ = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxaGHIJKLMN)), va_multiplier);
int32x4_t vaccKLMN = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxaGHIJKLMN)), va_multiplier);
int32x4_t vaccOPQR = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxaOPQRSTUV)), va_multiplier);
int32x4_t vaccSTUV = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxaOPQRSTUV)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
vaccGHIJ = vrshlq_s32(vaccGHIJ, vright_shift);
vaccKLMN = vrshlq_s32(vaccKLMN, vright_shift);
vaccOPQR = vrshlq_s32(vaccOPQR, vright_shift);
vaccSTUV = vrshlq_s32(vaccSTUV, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
const int16x8_t vaccGHIJKLMN = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN)), voutput_zero_point);
const int16x8_t vaccOPQRSTUV = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV)), voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x16_t voutGHIJKLMNOPQRSTUV = vcombine_s8(vqmovn_s16(vaccGHIJKLMN), vqmovn_s16(vaccOPQRSTUV));
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_s8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_s8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1q_s8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, vget_low_s8(va_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
#endif
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,560
| 46.543478
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-neon-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(¶ms->neon.a_zero_point);
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max);
const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
const int32_t vb = params->neon.b_multiplier;
const int32x4_t vbias = vdupq_n_s32(vxb * vb);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t va89ABCDEF = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(va89ABCDEF, va_zero_point);
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 4,410
| 40.224299
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-neon-ld64-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(¶ms->neon.a_zero_point);
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max);
const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
const int32_t vb = params->neon.b_multiplier;
const int32x4_t vbias = vdupq_n_s32(vxb * vb);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t va89ABCDEF = vld1_s8(input_a); input_a += 8;
const int8x8_t vaGHIJKLMN = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(va89ABCDEF, va_zero_point);
const int16x8_t vxaGHIJKLMN = vsubl_s8(vaGHIJKLMN, va_zero_point);
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccGHIJ = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxaGHIJKLMN)), va_multiplier);
int32x4_t vaccKLMN = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxaGHIJKLMN)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
vaccGHIJ = vrshlq_s32(vaccGHIJ, vright_shift);
vaccKLMN = vrshlq_s32(vaccKLMN, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
const int16x8_t vaccGHIJKLMN = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN)), voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x8_t voutGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMN = vmax_s8(voutGHIJKLMN, vget_low_s8(voutput_min));
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMN = vmin_s8(voutGHIJKLMN, vget_low_s8(voutput_max));
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1_s8(output, voutGHIJKLMN); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,203
| 43.101695
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-neon-ld64-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(¶ms->neon.a_zero_point);
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max);
const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
const int32_t vb = params->neon.b_multiplier;
const int32x4_t vbias = vdupq_n_s32(vxb * vb);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t va89ABCDEF = vld1_s8(input_a); input_a += 8;
const int8x8_t vaGHIJKLMN = vld1_s8(input_a); input_a += 8;
const int8x8_t vaOPQRSTUV = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(va89ABCDEF, va_zero_point);
const int16x8_t vxaGHIJKLMN = vsubl_s8(vaGHIJKLMN, va_zero_point);
const int16x8_t vxaOPQRSTUV = vsubl_s8(vaOPQRSTUV, va_zero_point);
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccGHIJ = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxaGHIJKLMN)), va_multiplier);
int32x4_t vaccKLMN = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxaGHIJKLMN)), va_multiplier);
int32x4_t vaccOPQR = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxaOPQRSTUV)), va_multiplier);
int32x4_t vaccSTUV = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxaOPQRSTUV)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
vaccGHIJ = vrshlq_s32(vaccGHIJ, vright_shift);
vaccKLMN = vrshlq_s32(vaccKLMN, vright_shift);
vaccOPQR = vrshlq_s32(vaccOPQR, vright_shift);
vaccSTUV = vrshlq_s32(vaccSTUV, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
const int16x8_t vaccGHIJKLMN = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN)), voutput_zero_point);
const int16x8_t vaccOPQRSTUV = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV)), voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x16_t voutGHIJKLMNOPQRSTUV = vcombine_s8(vqmovn_s16(vaccGHIJKLMN), vqmovn_s16(vaccOPQRSTUV));
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_s8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_s8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1q_s8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,824
| 45.6
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-neon-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(¶ms->neon.a_zero_point);
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->neon.output_max);
const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
const int32_t vb = params->neon.b_multiplier;
const int32x4_t vbias = vdupq_n_s32(vxb * vb);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const int8x8_t va01234567 = vld1_s8(input_a);
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
}
}
}
| 3,487
| 36.106383
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__scalar_x1(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias + (int32_t) *input_b * params->scalar.b_multiplier;
const int32_t va_multiplier = params->scalar.a_multiplier;
const uint32_t vshift = params->scalar.shift;
const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
do {
const int32_t va = *input_a++;
const int32_t vacc = vbias + va * va_multiplier;
int32_t vout = math_asr_s32(vacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
*output++ = (int8_t) (vout + voutput_zero_point);
batch -= sizeof(int8_t);
} while (batch != 0);
}
| 1,539
| 31.083333
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__scalar_x2(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias + (int32_t) *input_b * params->scalar.b_multiplier;
const int32_t va_multiplier = params->scalar.a_multiplier;
const uint32_t vshift = params->scalar.shift;
const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
for (; batch >= 2 * sizeof(int8_t); batch -= 2 * sizeof(int8_t)) {
const int32_t va0 = input_a[0];
const int32_t va1 = input_a[1];
input_a += 2;
const int32_t vacc0 = vbias + va0 * va_multiplier;
const int32_t vacc1 = vbias + va1 * va_multiplier;
input_b += 2;
int32_t vout0 = math_asr_s32(vacc0, vshift);
int32_t vout1 = math_asr_s32(vacc1, vshift);
vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
vout0 += voutput_zero_point;
vout1 += voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const int32_t va = *input_a;
const int32_t vacc = vbias + va * va_multiplier;
int32_t vout = math_asr_s32(vacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
*output++ = (int8_t) (vout + voutput_zero_point);
}
}
| 2,302
| 31.43662
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__scalar_x4(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias + (int32_t) *input_b * params->scalar.b_multiplier;
const int32_t va_multiplier = params->scalar.a_multiplier;
const uint32_t vshift = params->scalar.shift;
const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
const int32_t va0 = input_a[0];
const int32_t va1 = input_a[1];
const int32_t va2 = input_a[2];
const int32_t va3 = input_a[3];
input_a += 4;
const int32_t vacc0 = vbias + va0 * va_multiplier;
const int32_t vacc1 = vbias + va1 * va_multiplier;
const int32_t vacc2 = vbias + va2 * va_multiplier;
const int32_t vacc3 = vbias + va3 * va_multiplier;
input_b += 4;
int32_t vout0 = math_asr_s32(vacc0, vshift);
int32_t vout1 = math_asr_s32(vacc1, vshift);
int32_t vout2 = math_asr_s32(vacc2, vshift);
int32_t vout3 = math_asr_s32(vacc3, vshift);
vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
vout2 = math_max_s32(vout2, voutput_min_less_zero_point);
vout3 = math_max_s32(vout3, voutput_min_less_zero_point);
vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
vout2 = math_min_s32(vout2, voutput_max_less_zero_point);
vout3 = math_min_s32(vout3, voutput_max_less_zero_point);
vout0 += voutput_zero_point;
vout1 += voutput_zero_point;
vout2 += voutput_zero_point;
vout3 += voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int32_t va = *input_a++;
const int32_t vacc = vbias + va * va_multiplier;
int32_t vout = math_asr_s32(vacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
*output++ = (int8_t) (vout + voutput_zero_point);
batch -= sizeof(int8_t);
} while (batch != 0);
}
}
| 3,041
| 33.179775
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-sse2-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__sse2_mul16_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse2.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse2.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i va89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_a + 8));
input_a += 16;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
va89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(va89ABCDEF, va89ABCDEF), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout89ABCDEF = _mm_max_epi16(vout89ABCDEF, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
vout89ABCDEF = _mm_min_epi16(vout89ABCDEF, voutput_max);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
input_a += 8;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,000
| 43.783582
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-sse2-mul16-ld64-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__sse2_mul16_ld64_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse2.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse2.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i va89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_a + 8));
__m128i vaGHIJKLMN = _mm_loadl_epi64((const __m128i*) (input_a + 16));
input_a += 24;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
va89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(va89ABCDEF, va89ABCDEF), 8);
vaGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vaGHIJKLMN, vaGHIJKLMN), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
__m128i vaprodGHIJKLMNhi = _mm_mulhi_epu16(vaGHIJKLMN, va_multiplier_lo);
const __m128i vaprodGHIJKLMNlo = _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vaprodGHIJKLMNhi = _mm_add_epi16(vaprodGHIJKLMNhi, _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
vaprodGHIJKLMNhi = _mm_sub_epi16(vaprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vaGHIJKLMN, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout89ABCDEF = _mm_max_epi16(vout89ABCDEF, voutput_min);
voutGHIJKLMN = _mm_max_epi16(voutGHIJKLMN, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
vout89ABCDEF = _mm_min_epi16(vout89ABCDEF, voutput_max);
voutGHIJKLMN = _mm_min_epi16(voutGHIJKLMN, voutput_max);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(batch != 0) {
do {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
input_a += 8;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,210
| 47.395973
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-sse2-mul16-ld64-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__sse2_mul16_ld64_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse2.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse2.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i va89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_a + 8));
__m128i vaGHIJKLMN = _mm_loadl_epi64((const __m128i*) (input_a + 16));
__m128i vaOPQRSTUV = _mm_loadl_epi64((const __m128i*) (input_a + 24));
input_a += 32;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
va89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(va89ABCDEF, va89ABCDEF), 8);
vaGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vaGHIJKLMN, vaGHIJKLMN), 8);
vaOPQRSTUV = _mm_srai_epi16(_mm_unpacklo_epi8(vaOPQRSTUV, vaOPQRSTUV), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
__m128i vaprodGHIJKLMNhi = _mm_mulhi_epu16(vaGHIJKLMN, va_multiplier_lo);
const __m128i vaprodGHIJKLMNlo = _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_lo);
__m128i vaprodOPQRSTUVhi = _mm_mulhi_epu16(vaOPQRSTUV, va_multiplier_lo);
const __m128i vaprodOPQRSTUVlo = _mm_mullo_epi16(vaOPQRSTUV, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vaprodGHIJKLMNhi = _mm_add_epi16(vaprodGHIJKLMNhi, _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_hi));
vaprodOPQRSTUVhi = _mm_add_epi16(vaprodOPQRSTUVhi, _mm_mullo_epi16(vaOPQRSTUV, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
vaprodGHIJKLMNhi = _mm_sub_epi16(vaprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vaGHIJKLMN, 15), va_multiplier_lo));
vaprodOPQRSTUVhi = _mm_sub_epi16(vaprodOPQRSTUVhi, _mm_and_si128(_mm_srai_epi16(vaOPQRSTUV, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccOPQR = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodOPQRSTUVlo, vaprodOPQRSTUVhi));
__m128i vaccSTUV = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodOPQRSTUVlo, vaprodOPQRSTUVhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
vaccOPQR = _mm_sra_epi32(vaccOPQR, vshift);
vaccSTUV = _mm_sra_epi32(vaccSTUV, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i voutOPQRSTUV = _mm_adds_epi16(_mm_packs_epi32(vaccOPQR, vaccSTUV), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout89ABCDEF = _mm_max_epi16(vout89ABCDEF, voutput_min);
voutGHIJKLMN = _mm_max_epi16(voutGHIJKLMN, voutput_min);
voutOPQRSTUV = _mm_max_epi16(voutOPQRSTUV, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
vout89ABCDEF = _mm_min_epi16(vout89ABCDEF, voutput_max);
voutGHIJKLMN = _mm_min_epi16(voutGHIJKLMN, voutput_max);
voutOPQRSTUV = _mm_min_epi16(voutOPQRSTUV, voutput_max);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNOPQRSTUV = _mm_packs_epi16(voutGHIJKLMN, voutOPQRSTUV);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
input_a += 8;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
}
batch = 0;
}
} while (batch != 0);
}
}
| 8,270
| 50.055556
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-sse2-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__sse2_mul16_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse2.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse2.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
input_a += 8;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
}
}
}
}
| 4,647
| 40.132743
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-sse41-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__sse41_mul16_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse4_mul16.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse4_mul16.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
input_a += 16;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,828
| 44.186047
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-sse41-mul16-ld64-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__sse41_mul16_ld64_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse4_mul16.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse4_mul16.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vaGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 16)));
input_a += 24;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
__m128i vaprodGHIJKLMNhi = _mm_mulhi_epu16(vaGHIJKLMN, va_multiplier_lo);
const __m128i vaprodGHIJKLMNlo = _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vaprodGHIJKLMNhi = _mm_add_epi16(vaprodGHIJKLMNhi, _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
vaprodGHIJKLMNhi = _mm_sub_epi16(vaprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vaGHIJKLMN, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = _mm_min_epi8(voutGHIJKLMNGHIJKLMN, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,014
| 48.055944
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-sse41-mul16-ld64-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__sse41_mul16_ld64_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse4_mul16.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse4_mul16.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vaGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 16)));
const __m128i vaOPQRSTUV = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 24)));
input_a += 32;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
__m128i vaprodGHIJKLMNhi = _mm_mulhi_epu16(vaGHIJKLMN, va_multiplier_lo);
const __m128i vaprodGHIJKLMNlo = _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_lo);
__m128i vaprodOPQRSTUVhi = _mm_mulhi_epu16(vaOPQRSTUV, va_multiplier_lo);
const __m128i vaprodOPQRSTUVlo = _mm_mullo_epi16(vaOPQRSTUV, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vaprodGHIJKLMNhi = _mm_add_epi16(vaprodGHIJKLMNhi, _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_hi));
vaprodOPQRSTUVhi = _mm_add_epi16(vaprodOPQRSTUVhi, _mm_mullo_epi16(vaOPQRSTUV, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
vaprodGHIJKLMNhi = _mm_sub_epi16(vaprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vaGHIJKLMN, 15), va_multiplier_lo));
vaprodOPQRSTUVhi = _mm_sub_epi16(vaprodOPQRSTUVhi, _mm_and_si128(_mm_srai_epi16(vaOPQRSTUV, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
__m128i vaccOPQR = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodOPQRSTUVlo, vaprodOPQRSTUVhi));
__m128i vaccSTUV = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodOPQRSTUVlo, vaprodOPQRSTUVhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
vaccOPQR = _mm_sra_epi32(vaccOPQR, vshift);
vaccSTUV = _mm_sra_epi32(vaccSTUV, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i voutOPQRSTUV = _mm_adds_epi16(_mm_packs_epi32(vaccOPQR, vaccSTUV), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNOPQRSTUV = _mm_packs_epi16(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,898
| 50.627451
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-sse41-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__sse41_mul16_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse4_mul16.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse4_mul16.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,651
| 40.90991
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-sse41-mul32-ld32-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__sse41_mul32_ld32_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4_mul32.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4_mul32.bias));
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
input_a += 16;
input_b += 16;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 4,889
| 42.660714
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-sse41-mul32-ld32-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__sse41_mul32_ld32_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4_mul32.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4_mul32.bias));
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vaGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 16)));
const __m128i vaKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 20)));
input_a += 24;
input_b += 24;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_mullo_epi32(vaGHIJ, va_multiplier));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_mullo_epi32(vaKLMN, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
const __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = _mm_min_epi8(voutGHIJKLMNGHIJKLMN, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,773
| 45.943089
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-sse41-mul32-ld32-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__sse41_mul32_ld32_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4_mul32.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4_mul32.bias));
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vaGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 16)));
const __m128i vaKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 20)));
const __m128i vaOPQR = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 24)));
const __m128i vaSTUV = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 28)));
input_a += 32;
input_b += 32;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
__m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_mullo_epi32(vaGHIJ, va_multiplier));
__m128i vaccKLMN = _mm_add_epi32(vbias, _mm_mullo_epi32(vaKLMN, va_multiplier));
__m128i vaccOPQR = _mm_add_epi32(vbias, _mm_mullo_epi32(vaOPQR, va_multiplier));
__m128i vaccSTUV = _mm_add_epi32(vbias, _mm_mullo_epi32(vaSTUV, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
vaccOPQR = _mm_sra_epi32(vaccOPQR, vshift);
vaccSTUV = _mm_sra_epi32(vaccSTUV, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
const __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
const __m128i voutOPQRSTUV = _mm_adds_epi16(_mm_packs_epi32(vaccOPQR, vaccSTUV), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNOPQRSTUV = _mm_packs_epi16(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,355
| 47.892308
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-sse41-mul32-ld32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__sse41_mul32_ld32_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4_mul32.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4_mul32.bias));
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,013
| 40.381443
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t va_multiplier = wasm_v128_load64_splat(params->wasmsimd.a_multiplier);
const uint32_t vshift = params->wasmsimd.shift;
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.output_max);
v128_t vbias = wasm_i32x4_splat((int32_t) *input_b * params->wasmsimd.b_multiplier[0]);
vbias = wasm_i32x4_add(vbias, wasm_v128_load64_splat(params->wasmsimd.bias));
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t va89ABCDEF = wasm_i16x8_load8x8(input_a + 8);
input_a += 16;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
v128_t vacc89AB = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va89ABCDEF), va_multiplier));
v128_t vaccCDEF = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va89ABCDEF), va_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
vacc89AB = wasm_i32x4_shr(vacc89AB, vshift);
vaccCDEF = wasm_i32x4_shr(vaccCDEF, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout89ABCDEF = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF), voutput_zero_point);
v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = wasm_i8x16_max(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
input_a += 8;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 4,360
| 40.141509
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-wasmsimd-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t va_multiplier = wasm_v128_load64_splat(params->wasmsimd.a_multiplier);
const uint32_t vshift = params->wasmsimd.shift;
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.output_max);
v128_t vbias = wasm_i32x4_splat((int32_t) *input_b * params->wasmsimd.b_multiplier[0]);
vbias = wasm_i32x4_add(vbias, wasm_v128_load64_splat(params->wasmsimd.bias));
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t va89ABCDEF = wasm_i16x8_load8x8(input_a + 8);
const v128_t vaGHIJKLMN = wasm_i16x8_load8x8(input_a + 16);
input_a += 24;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
v128_t vacc89AB = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va89ABCDEF), va_multiplier));
v128_t vaccCDEF = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va89ABCDEF), va_multiplier));
v128_t vaccGHIJ = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vaGHIJKLMN), va_multiplier));
v128_t vaccKLMN = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vaGHIJKLMN), va_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
vacc89AB = wasm_i32x4_shr(vacc89AB, vshift);
vaccCDEF = wasm_i32x4_shr(vaccCDEF, vshift);
vaccGHIJ = wasm_i32x4_shr(vaccGHIJ, vshift);
vaccKLMN = wasm_i32x4_shr(vaccKLMN, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout89ABCDEF = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF), voutput_zero_point);
v128_t voutGHIJKLMN = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN), voutput_zero_point);
v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
v128_t voutGHIJKLMNGHIJKLMN = wasm_i8x16_narrow_i16x8(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = wasm_i8x16_max(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = wasm_i8x16_max(voutGHIJKLMNGHIJKLMN, voutput_min);
vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = wasm_i8x16_min(voutGHIJKLMNGHIJKLMN, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
wasm_v128_store64_lane(output + 16, voutGHIJKLMNGHIJKLMN, 0);
output += 24;
}
if XNN_UNLIKELY(batch != 0) {
do {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
input_a += 8;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,177
| 43.637931
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-wasmsimd-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t va_multiplier = wasm_v128_load64_splat(params->wasmsimd.a_multiplier);
const uint32_t vshift = params->wasmsimd.shift;
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.output_max);
v128_t vbias = wasm_i32x4_splat((int32_t) *input_b * params->wasmsimd.b_multiplier[0]);
vbias = wasm_i32x4_add(vbias, wasm_v128_load64_splat(params->wasmsimd.bias));
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t va89ABCDEF = wasm_i16x8_load8x8(input_a + 8);
const v128_t vaGHIJKLMN = wasm_i16x8_load8x8(input_a + 16);
const v128_t vaOPQRSTUV = wasm_i16x8_load8x8(input_a + 24);
input_a += 32;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
v128_t vacc89AB = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va89ABCDEF), va_multiplier));
v128_t vaccCDEF = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va89ABCDEF), va_multiplier));
v128_t vaccGHIJ = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vaGHIJKLMN), va_multiplier));
v128_t vaccKLMN = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vaGHIJKLMN), va_multiplier));
v128_t vaccOPQR = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vaOPQRSTUV), va_multiplier));
v128_t vaccSTUV = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vaOPQRSTUV), va_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
vacc89AB = wasm_i32x4_shr(vacc89AB, vshift);
vaccCDEF = wasm_i32x4_shr(vaccCDEF, vshift);
vaccGHIJ = wasm_i32x4_shr(vaccGHIJ, vshift);
vaccKLMN = wasm_i32x4_shr(vaccKLMN, vshift);
vaccOPQR = wasm_i32x4_shr(vaccOPQR, vshift);
vaccSTUV = wasm_i32x4_shr(vaccSTUV, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout89ABCDEF = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF), voutput_zero_point);
v128_t voutGHIJKLMN = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN), voutput_zero_point);
v128_t voutOPQRSTUV = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vaccOPQR, vaccSTUV), voutput_zero_point);
v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
v128_t voutGHIJKLMNOPQRSTUV = wasm_i8x16_narrow_i16x8(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = wasm_i8x16_max(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = wasm_i8x16_max(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = wasm_i8x16_min(voutGHIJKLMNOPQRSTUV, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
wasm_v128_store(output + 16, voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
input_a += 8;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,675
| 45.52459
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t va_multiplier = wasm_v128_load64_splat(params->wasmsimd.a_multiplier);
const uint32_t vshift = params->wasmsimd.shift;
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.output_max);
v128_t vbias = wasm_i32x4_splat((int32_t) *input_b * params->wasmsimd.b_multiplier[0]);
vbias = wasm_i32x4_add(vbias, wasm_v128_load64_splat(params->wasmsimd.bias));
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
input_a += 8;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
}
}
}
| 3,571
| 37.826087
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-xop-mul32-ld32-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__xop_mul32_ld32_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4_mul32.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4_mul32.bias));
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
input_a += 16;
input_b += 16;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
__m128i vacc89AB = _mm_macc_epi32(va89AB, va_multiplier, vbias);
__m128i vaccCDEF = _mm_macc_epi32(vaCDEF, va_multiplier, vbias);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 4,844
| 40.767241
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-xop-mul32-ld32-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__xop_mul32_ld32_x24(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4_mul32.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4_mul32.bias));
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vaGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 16)));
const __m128i vaKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 20)));
input_a += 24;
input_b += 24;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
__m128i vacc89AB = _mm_macc_epi32(va89AB, va_multiplier, vbias);
__m128i vaccCDEF = _mm_macc_epi32(vaCDEF, va_multiplier, vbias);
__m128i vaccGHIJ = _mm_macc_epi32(vaGHIJ, va_multiplier, vbias);
__m128i vaccKLMN = _mm_macc_epi32(vaKLMN, va_multiplier, vbias);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
const __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = _mm_min_epi8(voutGHIJKLMNGHIJKLMN, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,696
| 43.858268
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-xop-mul32-ld32-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__xop_mul32_ld32_x32(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4_mul32.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4_mul32.bias));
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vaGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 16)));
const __m128i vaKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 20)));
const __m128i vaOPQR = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 24)));
const __m128i vaSTUV = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 28)));
input_a += 32;
input_b += 32;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
__m128i vacc89AB = _mm_macc_epi32(va89AB, va_multiplier, vbias);
__m128i vaccCDEF = _mm_macc_epi32(vaCDEF, va_multiplier, vbias);
__m128i vaccGHIJ = _mm_macc_epi32(vaGHIJ, va_multiplier, vbias);
__m128i vaccKLMN = _mm_macc_epi32(vaKLMN, va_multiplier, vbias);
__m128i vaccOPQR = _mm_macc_epi32(vaOPQR, va_multiplier, vbias);
__m128i vaccSTUV = _mm_macc_epi32(vaSTUV, va_multiplier, vbias);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
vaccOPQR = _mm_sra_epi32(vaccOPQR, vshift);
vaccSTUV = _mm_sra_epi32(vaccSTUV, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
const __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
const __m128i voutOPQRSTUV = _mm_adds_epi16(_mm_packs_epi32(vaccOPQR, vaccSTUV), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNOPQRSTUV = _mm_packs_epi16(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,246
| 45.619403
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vaddc/gen/qs8-vaddc-minmax-xop-mul32-ld32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qs8_vaddc_minmax_ukernel__xop_mul32_ld32_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4_mul32.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4_mul32.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul32.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul32.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul32.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4_mul32.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4_mul32.bias));
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,000
| 38.613861
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-armsimd32-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__armsimd32_x4(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x2_t vminus_input_zero_point = (int16x2_t) params->armsimd32.minus_input_zero_point;
const int32_t vbias = params->armsimd32.bias;
const int32_t vmultiplier = params->armsimd32.multiplier;
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
const int8x4_t vx0123 = (int8x4_t) unaligned_load_u32(input);
input += 4;
const int16x2_t vx02 = __sxtab16(vminus_input_zero_point, vx0123);
const int16x2_t vx13 = __sxtab16(vminus_input_zero_point, __ror(vx0123, 8));
int32_t vacc0 = __smlawb(vmultiplier, vx02, vbias);
int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias);
int32_t vacc2 = __smlawt(vmultiplier, vx02, vbias);
int32_t vacc3 = __smlawt(vmultiplier, vx13, vbias);
vacc0 = __ssat(math_asr_s32(vacc0, 1), 8);
vacc1 = __ssat(math_asr_s32(vacc1, 1), 8);
vacc2 = __ssat(math_asr_s32(vacc2, 1), 8);
vacc3 = __ssat(math_asr_s32(vacc3, 1), 8);
output[0] = (int8_t) vacc0;
output[1] = (int8_t) vacc1;
output[2] = (int8_t) vacc2;
output[3] = (int8_t) vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const int8x4_t vx0123 = (int8x4_t) unaligned_load_u32(input);
const int16x2_t vx02 = __sxtab16(vminus_input_zero_point, vx0123);
const int16x2_t vx13 = __sxtab16(vminus_input_zero_point, __ror(vx0123, 8));
int32_t vacc0 = __smlawb(vmultiplier, vx02, vbias);
int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias);
const int32_t vacc2 = __smlawt(vmultiplier, vx02, vbias);
vacc0 = __ssat(math_asr_s32(vacc0, 1), 8);
vacc1 = __ssat(math_asr_s32(vacc1, 1), 8);
if (batch & (2 * sizeof(int8_t))) {
output[0] = (int8_t) vacc0;
output[1] = (int8_t) vacc1;
vacc0 = __ssat(math_asr_s32(vacc2, 1), 8);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
output[0] = (int8_t) vacc0;
}
}
}
| 2,616
| 31.308642
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-armsimd32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__armsimd32_x8(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x2_t vminus_input_zero_point = (int16x2_t) params->armsimd32.minus_input_zero_point;
const int32_t vbias = params->armsimd32.bias;
const int32_t vmultiplier = params->armsimd32.multiplier;
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x4_t vx0123 = (int8x4_t) unaligned_indexed_load_u32(input, 0);
const int8x4_t vx4567 = (int8x4_t) unaligned_indexed_load_u32(input, 1);
input += 8;
const int16x2_t vx02 = __sxtab16(vminus_input_zero_point, vx0123);
const int16x2_t vx13 = __sxtab16(vminus_input_zero_point, __ror(vx0123, 8));
const int16x2_t vx46 = __sxtab16(vminus_input_zero_point, vx4567);
const int16x2_t vx57 = __sxtab16(vminus_input_zero_point, __ror(vx4567, 8));
int32_t vacc0 = __smlawb(vmultiplier, vx02, vbias);
int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias);
int32_t vacc2 = __smlawt(vmultiplier, vx02, vbias);
int32_t vacc3 = __smlawt(vmultiplier, vx13, vbias);
int32_t vacc4 = __smlawb(vmultiplier, vx46, vbias);
int32_t vacc5 = __smlawb(vmultiplier, vx57, vbias);
int32_t vacc6 = __smlawt(vmultiplier, vx46, vbias);
int32_t vacc7 = __smlawt(vmultiplier, vx57, vbias);
vacc0 = __ssat(math_asr_s32(vacc0, 1), 8);
vacc1 = __ssat(math_asr_s32(vacc1, 1), 8);
vacc2 = __ssat(math_asr_s32(vacc2, 1), 8);
vacc3 = __ssat(math_asr_s32(vacc3, 1), 8);
vacc4 = __ssat(math_asr_s32(vacc4, 1), 8);
vacc5 = __ssat(math_asr_s32(vacc5, 1), 8);
vacc6 = __ssat(math_asr_s32(vacc6, 1), 8);
vacc7 = __ssat(math_asr_s32(vacc7, 1), 8);
output[0] = (int8_t) vacc0;
output[1] = (int8_t) vacc1;
output[2] = (int8_t) vacc2;
output[3] = (int8_t) vacc3;
output[4] = (int8_t) vacc4;
output[5] = (int8_t) vacc5;
output[6] = (int8_t) vacc6;
output[7] = (int8_t) vacc7;
output += 8;
}
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
const int8x4_t vx0123 = (int8x4_t) unaligned_load_u32(input);
input += 4;
const int16x2_t vx02 = __sxtab16(vminus_input_zero_point, vx0123);
const int16x2_t vx13 = __sxtab16(vminus_input_zero_point, __ror(vx0123, 8));
int32_t vacc0 = __smlawb(vmultiplier, vx02, vbias);
int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias);
int32_t vacc2 = __smlawt(vmultiplier, vx02, vbias);
int32_t vacc3 = __smlawt(vmultiplier, vx13, vbias);
vacc0 = __ssat(math_asr_s32(vacc0, 1), 8);
vacc1 = __ssat(math_asr_s32(vacc1, 1), 8);
vacc2 = __ssat(math_asr_s32(vacc2, 1), 8);
vacc3 = __ssat(math_asr_s32(vacc3, 1), 8);
output[0] = (int8_t) vacc0;
output[1] = (int8_t) vacc1;
output[2] = (int8_t) vacc2;
output[3] = (int8_t) vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const int8x4_t vx0123 = (int8x4_t) unaligned_load_u32(input);
const int16x2_t vx02 = __sxtab16(vminus_input_zero_point, vx0123);
const int16x2_t vx13 = __sxtab16(vminus_input_zero_point, __ror(vx0123, 8));
int32_t vacc0 = __smlawb(vmultiplier, vx02, vbias);
int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias);
const int32_t vacc2 = __smlawt(vmultiplier, vx02, vbias);
vacc0 = __ssat(math_asr_s32(vacc0, 1), 8);
vacc1 = __ssat(math_asr_s32(vacc1, 1), 8);
if (batch & (2 * sizeof(int8_t))) {
output[0] = (int8_t) vacc0;
output[1] = (int8_t) vacc1;
vacc0 = __ssat(math_asr_s32(vacc2, 1), 8);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
output[0] = (int8_t) vacc0;
}
}
}
| 4,264
| 34.840336
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__avx_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m128i vacc0 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
input += 16;
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vacc0 = _mm_slli_epi16(vacc0, 7);
vacc1 = _mm_slli_epi16(vacc1, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,054
| 31.849462
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-avx-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__avx_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
__m128i vacc0 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
__m128i vacc2 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 16)));
__m128i vacc3 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 24)));
input += 32;
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vacc2 = _mm_sub_epi16(vinput_zero_point, vacc2);
vacc3 = _mm_sub_epi16(vinput_zero_point, vacc3);
vacc0 = _mm_slli_epi16(vacc0, 7);
vacc1 = _mm_slli_epi16(vacc1, 7);
vacc2 = _mm_slli_epi16(vacc2, 7);
vacc3 = _mm_slli_epi16(vacc3, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier);
vacc2 = _mm_mulhrs_epi16(vacc2, vmultiplier);
vacc3 = _mm_mulhrs_epi16(vacc3, vmultiplier);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packs_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,728
| 34.514286
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__avx_x8(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 2,278
| 31.098592
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-avx2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__avx2_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point);
const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->avx2.multiplier);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m256i vacc = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
input += 16;
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
const __m128i vy = _mm_packs_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
__m256i vacc = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
__m128i vy = _mm_packs_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
if (batch & (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 2,652
| 33.012821
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-avx2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__avx2_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point);
const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->avx2.multiplier);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
__m256i vacc0 = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vacc1 = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (input + 16)));
input += 32;
vacc0 = _mm256_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm256_sub_epi16(vinput_zero_point, vacc1);
vacc0 = _mm256_slli_epi16(vacc0, 7);
vacc1 = _mm256_slli_epi16(vacc1, 7);
vacc0 = _mm256_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm256_mulhrs_epi16(vacc1, vmultiplier);
vacc0 = _mm256_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm256_adds_epi16(vacc1, voutput_zero_point);
__m256i vy0 = _mm256_packs_epi16(vacc0, vacc1);
vy0 = _mm256_permute4x64_epi64(vy0, _MM_SHUFFLE(3, 1, 2, 0));
_mm256_storeu_si256((__m256i*) output, vy0);
output += 32;
}
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m256i vacc = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
input += 16;
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
const __m128i vy = _mm_packs_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
__m256i vacc = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
__m128i vy = _mm_packs_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
if (batch & (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,525
| 33.568627
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-avx2-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__avx2_x64(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point);
const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->avx2.multiplier);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
for (; batch >= 64 * sizeof(int8_t); batch -= 64 * sizeof(int8_t)) {
__m256i vacc0 = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vacc1 = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (input + 16)));
__m256i vacc2 = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (input + 32)));
__m256i vacc3 = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (input + 48)));
input += 64;
vacc0 = _mm256_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm256_sub_epi16(vinput_zero_point, vacc1);
vacc2 = _mm256_sub_epi16(vinput_zero_point, vacc2);
vacc3 = _mm256_sub_epi16(vinput_zero_point, vacc3);
vacc0 = _mm256_slli_epi16(vacc0, 7);
vacc1 = _mm256_slli_epi16(vacc1, 7);
vacc2 = _mm256_slli_epi16(vacc2, 7);
vacc3 = _mm256_slli_epi16(vacc3, 7);
vacc0 = _mm256_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm256_mulhrs_epi16(vacc1, vmultiplier);
vacc2 = _mm256_mulhrs_epi16(vacc2, vmultiplier);
vacc3 = _mm256_mulhrs_epi16(vacc3, vmultiplier);
vacc0 = _mm256_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm256_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm256_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm256_adds_epi16(vacc3, voutput_zero_point);
__m256i vy0 = _mm256_packs_epi16(vacc0, vacc1);
__m256i vy1 = _mm256_packs_epi16(vacc2, vacc3);
vy0 = _mm256_permute4x64_epi64(vy0, _MM_SHUFFLE(3, 1, 2, 0));
vy1 = _mm256_permute4x64_epi64(vy1, _MM_SHUFFLE(3, 1, 2, 0));
_mm256_storeu_si256((__m256i*) output, vy0);
_mm256_storeu_si256((__m256i*) (output + 32), vy1);
output += 64;
}
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m256i vacc = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
input += 16;
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
const __m128i vy = _mm_packs_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
__m256i vacc = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
__m128i vy = _mm_packs_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
if (batch & (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,295
| 36.356522
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__neon_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vinput_zero_point = vld1q_dup_s16(¶ms->neon.input_zero_point);
const int16x8_t vmultiplier = vld1q_dup_s16(¶ms->neon.multiplier);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x16_t vx0 = vld1q_s8(input); input += 16;
int16x8_t vacc0 = vsubw_s8(vinput_zero_point, vget_low_s8(vx0));
int16x8_t vacc1 = vsubw_s8(vinput_zero_point, vget_high_s8(vx0));
vacc0 = vshlq_n_s16(vacc0, 7);
vacc1 = vshlq_n_s16(vacc1, 7);
vacc0 = vqrdmulhq_s16(vacc0, vmultiplier);
vacc1 = vqrdmulhq_s16(vacc1, vmultiplier);
vacc0 = vqaddq_s16(vacc0, voutput_zero_point);
vacc1 = vqaddq_s16(vacc1, voutput_zero_point);
const int8x16_t vy0 = vcombine_s8(vqmovn_s16(vacc0), vqmovn_s16(vacc1));
vst1q_s8(output, vy0); output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t vx = vld1_s8(input); input += 8;
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
vacc = vshlq_n_s16(vacc, 7);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
const int8x8_t vy = vqmovn_s16(vacc);
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const int8x8_t vx = vld1_s8(input);
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
vacc = vshlq_n_s16(vacc, 7);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 2,734
| 31.559524
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__neon_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vinput_zero_point = vld1q_dup_s16(¶ms->neon.input_zero_point);
const int16x8_t vmultiplier = vld1q_dup_s16(¶ms->neon.multiplier);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const int8x16_t vx0 = vld1q_s8(input); input += 16;
const int8x16_t vx1 = vld1q_s8(input); input += 16;
int16x8_t vacc0 = vsubw_s8(vinput_zero_point, vget_low_s8(vx0));
int16x8_t vacc1 = vsubw_s8(vinput_zero_point, vget_high_s8(vx0));
int16x8_t vacc2 = vsubw_s8(vinput_zero_point, vget_low_s8(vx1));
int16x8_t vacc3 = vsubw_s8(vinput_zero_point, vget_high_s8(vx1));
vacc0 = vshlq_n_s16(vacc0, 7);
vacc1 = vshlq_n_s16(vacc1, 7);
vacc2 = vshlq_n_s16(vacc2, 7);
vacc3 = vshlq_n_s16(vacc3, 7);
vacc0 = vqrdmulhq_s16(vacc0, vmultiplier);
vacc1 = vqrdmulhq_s16(vacc1, vmultiplier);
vacc2 = vqrdmulhq_s16(vacc2, vmultiplier);
vacc3 = vqrdmulhq_s16(vacc3, vmultiplier);
vacc0 = vqaddq_s16(vacc0, voutput_zero_point);
vacc1 = vqaddq_s16(vacc1, voutput_zero_point);
vacc2 = vqaddq_s16(vacc2, voutput_zero_point);
vacc3 = vqaddq_s16(vacc3, voutput_zero_point);
const int8x16_t vy0 = vcombine_s8(vqmovn_s16(vacc0), vqmovn_s16(vacc1));
const int8x16_t vy1 = vcombine_s8(vqmovn_s16(vacc2), vqmovn_s16(vacc3));
vst1q_s8(output, vy0); output += 16;
vst1q_s8(output, vy1); output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t vx = vld1_s8(input); input += 8;
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
vacc = vshlq_n_s16(vacc, 7);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
const int8x8_t vy = vqmovn_s16(vacc);
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const int8x8_t vx = vld1_s8(input);
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
vacc = vshlq_n_s16(vacc, 7);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 3,313
| 33.884211
| 86
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.