repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-4x4-multi-switch-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x64_transposec_ukernel__4x4_multi_switch_avx(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(double));
assert(input_stride >= block_width * sizeof(double));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(double);
const size_t tile_wbytes = tile_width * sizeof(double);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(double);
const double* i0 = (const double*) input;
const double* i1 = (const double*) ((uintptr_t) i0 + input_stride);
const double* i2 = (const double*) ((uintptr_t) i1 + input_stride);
const double* i3 = (const double*) ((uintptr_t) i2 + input_stride);
double* o = (double*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
__m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[rem ^ 3]));
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const __m256d v2_0 = _mm256_maskload_pd(i0, vmask);
i0 = (double*) ((uintptr_t) i0 + input_offset);
const __m256d v2_1 = _mm256_maskload_pd(i1, vmask);
i1 = (double*) ((uintptr_t) i1 + input_offset);
const __m256d v2_2 = _mm256_maskload_pd(i2, vmask);
i2 = (double*) ((uintptr_t) i2 + input_offset);
const __m256d v2_3 = _mm256_maskload_pd(i3, vmask);
i3 = (double*) ((uintptr_t) i3 + input_offset);
const __m256d v1_0 = _mm256_unpacklo_pd(v2_0, v2_1);
const __m256d v1_1 = _mm256_unpackhi_pd(v2_0, v2_1);
const __m256d v1_2 = _mm256_unpacklo_pd(v2_2, v2_3);
const __m256d v1_3 = _mm256_unpackhi_pd(v2_2, v2_3);
double* oN = (double*) ((uintptr_t) o + oN_stride);
switch (rem) {
default:
XNN_UNREACHABLE;
case 3: {
const __m256d v0_3 = _mm256_permute2f128_pd(v1_1, v1_3, 0x31);
_mm256_storeu_pd(oN, v0_3);
oN = (double*) ((uintptr_t) oN + minus_output_stride);
}
case 2: {
const __m256d v0_2 = _mm256_permute2f128_pd(v1_0, v1_2, 0x31);
_mm256_storeu_pd(oN, v0_2);
oN = (double*) ((uintptr_t) oN + minus_output_stride);
}
case 1: {
const __m256d v0_1 = _mm256_insertf128_pd(v1_1, _mm256_castpd256_pd128(v1_3), 1);
_mm256_storeu_pd( oN, v0_1);
}
case 0: {
const __m256d v0_0 = _mm256_insertf128_pd(v1_0, _mm256_castpd256_pd128(v1_2), 1);
_mm256_storeu_pd(o, v0_0);
o = (double*) ((uintptr_t) o + tile_hbytes);
}
}
}
if (bh != 0) {
const __m256d v2_0 = _mm256_maskload_pd(i0, vmask);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m256d v2_1 = _mm256_maskload_pd(i1, vmask);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const __m256d v2_2 = _mm256_maskload_pd(i2, vmask);
const __m256d v2_3 = _mm256_undefined_pd();
const __m256d v1_0 = _mm256_unpacklo_pd(v2_0, v2_1);
const __m256d v1_1 = _mm256_unpackhi_pd(v2_0, v2_1);
const __m256d v1_2 = _mm256_unpacklo_pd(v2_2, v2_3);
const __m256d v1_3 = _mm256_unpackhi_pd(v2_2, v2_3);
__m256d v0_0 = _mm256_insertf128_pd(v1_0, _mm256_castpd256_pd128(v1_2), 1);
__m256d v0_2 = _mm256_permute2f128_pd(v1_0, v1_2, 0x31);
__m256d v0_1 = _mm256_insertf128_pd(v1_1, _mm256_castpd256_pd128(v1_3), 1);
__m256d v0_3 = _mm256_permute2f128_pd(v1_1, v1_3, 0x31);
__m128d v0_0_lo = _mm256_castpd256_pd128(v0_0);
__m128d v0_1_lo = _mm256_castpd256_pd128(v0_1);
__m128d v0_2_lo = _mm256_castpd256_pd128(v0_2);
__m128d v0_3_lo = _mm256_castpd256_pd128(v0_3);
if (bh & 2) {
double* oN = (double*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
_mm_storeu_pd(oN, v0_3_lo);
v0_3_lo = _mm256_extractf128_pd(v0_3, 1);
oN = (double*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storeu_pd(oN, v0_2_lo);
v0_2_lo = _mm256_extractf128_pd(v0_2, 1);
oN = (double*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storeu_pd(oN, v0_1_lo);
v0_1_lo = _mm256_extractf128_pd(v0_1, 1);
case 0:
_mm_storeu_pd(o, v0_0_lo);
v0_0_lo = _mm256_extractf128_pd(v0_0, 1);
break;
default:
XNN_UNREACHABLE;
}
o += 2;
}
if (bh & 1) {
double* oN = (double*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
_mm_storel_pd(oN, v0_3_lo);
oN = (double*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storel_pd(oN, v0_2_lo);
oN = (double*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storel_pd(oN, v0_1_lo);
case 0:
_mm_storel_pd(o, v0_0_lo);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const double*) ((uintptr_t) i0 + input_reset);
i1 = (const double*) ((uintptr_t) i0 + input_stride);
i2 = (const double*) ((uintptr_t) i1 + input_stride);
i3 = (const double*) ((uintptr_t) i2 + input_stride);
o = (double*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 6,345
| 35.471264
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-4x4-reuse-mov-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x64_transposec_ukernel__4x4_reuse_mov_avx(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(double));
assert(input_stride >= block_width * sizeof(double));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(double);
const size_t tile_wbytes = tile_width * sizeof(double);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(double) - tile_hbytes;
const double* i0 = (const double*) input;
double* o = (double*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
__m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[rem ^ 3]));
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const __m256d v2_0 = _mm256_maskload_pd(i0, vmask);
i0 = (double*) ((uintptr_t) i0 + input_stride);
const __m256d v2_1 = _mm256_maskload_pd(i0, vmask);
i0 = (double*) ((uintptr_t) i0 + input_stride);
const __m256d v2_2 = _mm256_maskload_pd(i0, vmask);
i0 = (double*) ((uintptr_t) i0 + input_stride);
const __m256d v2_3 = _mm256_maskload_pd(i0, vmask);
i0 = (double*) ((uintptr_t) i0 + input_stride);
const __m256d v1_0 = _mm256_unpacklo_pd(v2_0, v2_1);
const __m256d v1_1 = _mm256_unpackhi_pd(v2_0, v2_1);
const __m256d v1_2 = _mm256_unpacklo_pd(v2_2, v2_3);
const __m256d v1_3 = _mm256_unpackhi_pd(v2_2, v2_3);
const __m256d v0_0 = _mm256_insertf128_pd(v1_0, _mm256_castpd256_pd128(v1_2), 1);
const __m256d v0_2 = _mm256_permute2f128_pd(v1_0, v1_2, 0x31);
const __m256d v0_1 = _mm256_insertf128_pd(v1_1, _mm256_castpd256_pd128(v1_3), 1);
const __m256d v0_3 = _mm256_permute2f128_pd(v1_1, v1_3, 0x31);
o = (double*) ((uintptr_t) o + oN_offset);
_mm256_storeu_pd(o, v0_3);
double *oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm256_storeu_pd(o, v0_2);
oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm256_storeu_pd(o, v0_1);
oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm256_storeu_pd(o, v0_0);
}
o = (double*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const __m256d v2_0 = _mm256_maskload_pd(i0, vmask);
const double *i1 = (const double*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m256d v2_1 = _mm256_maskload_pd(i1, vmask);
const double *i2 = (const double*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const __m256d v2_2 = _mm256_maskload_pd(i2, vmask);
const __m256d v2_3 = _mm256_undefined_pd();
const __m256d v1_0 = _mm256_unpacklo_pd(v2_0, v2_1);
const __m256d v1_1 = _mm256_unpackhi_pd(v2_0, v2_1);
const __m256d v1_2 = _mm256_unpacklo_pd(v2_2, v2_3);
const __m256d v1_3 = _mm256_unpackhi_pd(v2_2, v2_3);
__m256d v0_0 = _mm256_insertf128_pd(v1_0, _mm256_castpd256_pd128(v1_2), 1);
__m256d v0_2 = _mm256_permute2f128_pd(v1_0, v1_2, 0x31);
__m256d v0_1 = _mm256_insertf128_pd(v1_1, _mm256_castpd256_pd128(v1_3), 1);
__m256d v0_3 = _mm256_permute2f128_pd(v1_1, v1_3, 0x31);
__m128d v0_0_lo = _mm256_castpd256_pd128(v0_0);
__m128d v0_1_lo = _mm256_castpd256_pd128(v0_1);
__m128d v0_2_lo = _mm256_castpd256_pd128(v0_2);
__m128d v0_3_lo = _mm256_castpd256_pd128(v0_3);
if (bh & 2) {
o = (double*) ((uintptr_t) o + oN_stride);
_mm_storeu_pd(o, v0_3_lo);
v0_3_lo = _mm256_extractf128_pd(v0_3, 1);
double *oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storeu_pd(o, v0_2_lo);
v0_2_lo = _mm256_extractf128_pd(v0_2, 1);
oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storeu_pd(o, v0_1_lo);
v0_1_lo = _mm256_extractf128_pd(v0_1, 1);
oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storeu_pd(o, v0_0_lo);
v0_0_lo = _mm256_extractf128_pd(v0_0, 1);
o += 2;
}
if (bh & 1) {
o = (double*) ((uintptr_t) o + oN_stride);
_mm_storel_pd(o, v0_3_lo);
double *oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storel_pd(o, v0_2_lo);
oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storel_pd(o, v0_1_lo);
oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storel_pd(o, v0_0_lo);
}
}
i0 = (const double*) ((uintptr_t) i0 + input_reset);
o = (double*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 6,337
| 36.282353
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-4x4-reuse-multi-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x64_transposec_ukernel__4x4_reuse_multi_avx(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(double));
assert(input_stride >= block_width * sizeof(double));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(double);
const size_t tile_wbytes = tile_width * sizeof(double);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(double);
const double* i0 = (const double*) input;
double* o0 = (double*) output;
do {
double* o1 = (double*) (block_width < 2 ? o0 : (double*) ((uintptr_t) o0 + output_stride));
double* o2 = (double*) (block_width <= 2 ? o0 : (double*) ((uintptr_t) o1 + output_stride));
double* o3 = (double*) (block_width < 4 ? o0 : (double*) ((uintptr_t) o2 + output_stride));
const size_t rem = min(block_width - 1, 3);
__m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[rem ^ 3]));
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const __m256d v2_0 = _mm256_maskload_pd(i0, vmask);
i0 = (double*) ((uintptr_t) i0 + input_stride);
const __m256d v2_1 = _mm256_maskload_pd(i0, vmask);
i0 = (double*) ((uintptr_t) i0 + input_stride);
const __m256d v2_2 = _mm256_maskload_pd(i0, vmask);
i0 = (double*) ((uintptr_t) i0 + input_stride);
const __m256d v2_3 = _mm256_maskload_pd(i0, vmask);
i0 = (double*) ((uintptr_t) i0 + input_stride);
const __m256d v1_0 = _mm256_unpacklo_pd(v2_0, v2_1);
const __m256d v1_1 = _mm256_unpackhi_pd(v2_0, v2_1);
const __m256d v1_2 = _mm256_unpacklo_pd(v2_2, v2_3);
const __m256d v1_3 = _mm256_unpackhi_pd(v2_2, v2_3);
const __m256d v0_0 = _mm256_insertf128_pd(v1_0, _mm256_castpd256_pd128(v1_2), 1);
const __m256d v0_2 = _mm256_permute2f128_pd(v1_0, v1_2, 0x31);
const __m256d v0_1 = _mm256_insertf128_pd(v1_1, _mm256_castpd256_pd128(v1_3), 1);
const __m256d v0_3 = _mm256_permute2f128_pd(v1_1, v1_3, 0x31);
_mm256_storeu_pd(o3, v0_3);
o3 = (double*) ((uintptr_t) o3 + tile_hbytes);
_mm256_storeu_pd(o2, v0_2);
o2 = (double*) ((uintptr_t) o2 + tile_hbytes);
_mm256_storeu_pd(o1, v0_1);
o1 = (double*) ((uintptr_t) o1 + tile_hbytes);
_mm256_storeu_pd(o0, v0_0);
o0 = (double*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const __m256d v2_0 = _mm256_maskload_pd(i0, vmask);
const double *i1 = (const double*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m256d v2_1 = _mm256_maskload_pd(i1, vmask);
const double *i2 = (const double*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const __m256d v2_2 = _mm256_maskload_pd(i2, vmask);
const __m256d v2_3 = _mm256_undefined_pd();
const __m256d v1_0 = _mm256_unpacklo_pd(v2_0, v2_1);
const __m256d v1_1 = _mm256_unpackhi_pd(v2_0, v2_1);
const __m256d v1_2 = _mm256_unpacklo_pd(v2_2, v2_3);
const __m256d v1_3 = _mm256_unpackhi_pd(v2_2, v2_3);
__m256d v0_0 = _mm256_insertf128_pd(v1_0, _mm256_castpd256_pd128(v1_2), 1);
__m256d v0_2 = _mm256_permute2f128_pd(v1_0, v1_2, 0x31);
__m256d v0_1 = _mm256_insertf128_pd(v1_1, _mm256_castpd256_pd128(v1_3), 1);
__m256d v0_3 = _mm256_permute2f128_pd(v1_1, v1_3, 0x31);
__m128d v0_0_lo = _mm256_castpd256_pd128(v0_0);
__m128d v0_1_lo = _mm256_castpd256_pd128(v0_1);
__m128d v0_2_lo = _mm256_castpd256_pd128(v0_2);
__m128d v0_3_lo = _mm256_castpd256_pd128(v0_3);
if (bh & 2) {
_mm_storeu_pd(o3, v0_3_lo);
v0_3_lo = _mm256_extractf128_pd(v0_3, 1);
o3 += 2;
_mm_storeu_pd(o2, v0_2_lo);
v0_2_lo = _mm256_extractf128_pd(v0_2, 1);
o2 += 2;
_mm_storeu_pd(o1, v0_1_lo);
v0_1_lo = _mm256_extractf128_pd(v0_1, 1);
o1 += 2;
_mm_storeu_pd(o0, v0_0_lo);
v0_0_lo = _mm256_extractf128_pd(v0_0, 1);
o0 += 2;
}
if (bh & 1) {
_mm_storel_pd(o3, v0_3_lo);
_mm_storel_pd(o2, v0_2_lo);
_mm_storel_pd(o1, v0_1_lo);
_mm_storel_pd(o0, v0_0_lo);
}
}
i0 = (const double*) ((uintptr_t) i0 + input_reset);
o0 = (double*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,263
| 37.423358
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-4x4-reuse-switch-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x64_transposec_ukernel__4x4_reuse_switch_avx(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(double));
assert(input_stride >= block_width * sizeof(double));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(double);
const size_t tile_wbytes = tile_width * sizeof(double);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(double);
const double* i0 = (const double*) input;
double* o = (double*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
__m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[rem ^ 3]));
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const __m256d v2_0 = _mm256_maskload_pd(i0, vmask);
i0 = (double*) ((uintptr_t) i0 + input_stride);
const __m256d v2_1 = _mm256_maskload_pd(i0, vmask);
i0 = (double*) ((uintptr_t) i0 + input_stride);
const __m256d v2_2 = _mm256_maskload_pd(i0, vmask);
i0 = (double*) ((uintptr_t) i0 + input_stride);
const __m256d v2_3 = _mm256_maskload_pd(i0, vmask);
i0 = (double*) ((uintptr_t) i0 + input_stride);
const __m256d v1_0 = _mm256_unpacklo_pd(v2_0, v2_1);
const __m256d v1_1 = _mm256_unpackhi_pd(v2_0, v2_1);
const __m256d v1_2 = _mm256_unpacklo_pd(v2_2, v2_3);
const __m256d v1_3 = _mm256_unpackhi_pd(v2_2, v2_3);
double* oN = (double*) ((uintptr_t) o + oN_stride);
switch (rem) {
default:
XNN_UNREACHABLE;
case 3: {
const __m256d v0_3 = _mm256_permute2f128_pd(v1_1, v1_3, 0x31);
_mm256_storeu_pd(oN, v0_3);
oN = (double*) ((uintptr_t) oN + minus_output_stride);
}
case 2: {
const __m256d v0_2 = _mm256_permute2f128_pd(v1_0, v1_2, 0x31);
_mm256_storeu_pd(oN, v0_2);
oN = (double*) ((uintptr_t) oN + minus_output_stride);
}
case 1: {
const __m256d v0_1 = _mm256_insertf128_pd(v1_1, _mm256_castpd256_pd128(v1_3), 1);
_mm256_storeu_pd( oN, v0_1);
}
case 0: {
const __m256d v0_0 = _mm256_insertf128_pd(v1_0, _mm256_castpd256_pd128(v1_2), 1);
_mm256_storeu_pd(o, v0_0);
o = (double*) ((uintptr_t) o + tile_hbytes);
}
}
}
if (bh != 0) {
const __m256d v2_0 = _mm256_maskload_pd(i0, vmask);
const double *i1 = (const double*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m256d v2_1 = _mm256_maskload_pd(i1, vmask);
const double *i2 = (const double*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const __m256d v2_2 = _mm256_maskload_pd(i2, vmask);
const __m256d v2_3 = _mm256_undefined_pd();
const __m256d v1_0 = _mm256_unpacklo_pd(v2_0, v2_1);
const __m256d v1_1 = _mm256_unpackhi_pd(v2_0, v2_1);
const __m256d v1_2 = _mm256_unpacklo_pd(v2_2, v2_3);
const __m256d v1_3 = _mm256_unpackhi_pd(v2_2, v2_3);
__m256d v0_0 = _mm256_insertf128_pd(v1_0, _mm256_castpd256_pd128(v1_2), 1);
__m256d v0_2 = _mm256_permute2f128_pd(v1_0, v1_2, 0x31);
__m256d v0_1 = _mm256_insertf128_pd(v1_1, _mm256_castpd256_pd128(v1_3), 1);
__m256d v0_3 = _mm256_permute2f128_pd(v1_1, v1_3, 0x31);
__m128d v0_0_lo = _mm256_castpd256_pd128(v0_0);
__m128d v0_1_lo = _mm256_castpd256_pd128(v0_1);
__m128d v0_2_lo = _mm256_castpd256_pd128(v0_2);
__m128d v0_3_lo = _mm256_castpd256_pd128(v0_3);
if (bh & 2) {
double* oN = (double*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
_mm_storeu_pd(oN, v0_3_lo);
v0_3_lo = _mm256_extractf128_pd(v0_3, 1);
oN = (double*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storeu_pd(oN, v0_2_lo);
v0_2_lo = _mm256_extractf128_pd(v0_2, 1);
oN = (double*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storeu_pd(oN, v0_1_lo);
v0_1_lo = _mm256_extractf128_pd(v0_1, 1);
case 0:
_mm_storeu_pd(o, v0_0_lo);
v0_0_lo = _mm256_extractf128_pd(v0_0, 1);
break;
default:
XNN_UNREACHABLE;
}
o += 2;
}
if (bh & 1) {
double* oN = (double*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
_mm_storel_pd(oN, v0_3_lo);
oN = (double*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storel_pd(oN, v0_2_lo);
oN = (double*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storel_pd(oN, v0_1_lo);
case 0:
_mm_storel_pd(o, v0_0_lo);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const double*) ((uintptr_t) i0 + input_reset);
o = (double*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 6,051
| 34.810651
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-aarch64-neon-tbx128x4-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/neon-tbx128x4.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__aarch64_neon_tbx128x4_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint8x16x4_t vtable0123 = vld1q_u8_x4(table);
const uint8x16x4_t vtable4567 = vld1q_u8_x4(table + 64);
const uint8x16x4_t vtable89AB = vld1q_u8_x4(table + 128);
const uint8x16x4_t vtableCDEF = vld1q_u8_x4(table + 192);
const uint8x16_t voffset = vmovq_n_u8(64);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
uint8x16_t vx = vld1q_u8(input); input += 16;
uint8x16_t vy = vqtbl4q_u8(vtable0123, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable4567, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable89AB, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtableCDEF, vx);
vst1q_u8(output, vy); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
uint8x16_t vx = vld1q_u8(input);
uint8x16_t vy = vqtbl4q_u8(vtable0123, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable4567, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable89AB, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtableCDEF, vx);
uint8x8_t vy_lo = vget_low_u8(vy);
if (batch & (8 * sizeof(uint8_t))) {
vst1_u8(output, vy_lo); output += 8;
vy_lo = vget_high_u8(vy);
}
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy_lo), 0); output += 4;
vy_lo = vext_u8(vy_lo, vy_lo, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy_lo), 0); output += 2;
vy_lo = vext_u8(vy_lo, vy_lo, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vy_lo, 0);
}
}
}
| 2,354
| 27.373494
| 80
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-aarch64-neon-tbx128x4-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/neon-tbx128x4.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__aarch64_neon_tbx128x4_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint8x16x4_t vtable0123 = vld1q_u8_x4(table);
const uint8x16x4_t vtable4567 = vld1q_u8_x4(table + 64);
const uint8x16x4_t vtable89AB = vld1q_u8_x4(table + 128);
const uint8x16x4_t vtableCDEF = vld1q_u8_x4(table + 192);
const uint8x16_t voffset = vmovq_n_u8(64);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
uint8x16_t vx0 = vld1q_u8(input); input += 16;
uint8x16_t vx1 = vld1q_u8(input); input += 16;
uint8x16_t vy0 = vqtbl4q_u8(vtable0123, vx0);
vx0 = vsubq_u8(vx0, voffset);
uint8x16_t vy1 = vqtbl4q_u8(vtable0123, vx1);
vx1 = vsubq_u8(vx1, voffset);
vy0 = vqtbx4q_u8(vy0, vtable4567, vx0);
vx0 = vsubq_u8(vx0, voffset);
vy1 = vqtbx4q_u8(vy1, vtable4567, vx1);
vx1 = vsubq_u8(vx1, voffset);
vy0 = vqtbx4q_u8(vy0, vtable89AB, vx0);
vx0 = vsubq_u8(vx0, voffset);
vy1 = vqtbx4q_u8(vy1, vtable89AB, vx1);
vx1 = vsubq_u8(vx1, voffset);
vy0 = vqtbx4q_u8(vy0, vtableCDEF, vx0);
vy1 = vqtbx4q_u8(vy1, vtableCDEF, vx1);
vst1q_u8(output, vy0); output += 16;
vst1q_u8(output, vy1); output += 16;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
uint8x16_t vx = vld1q_u8(input); input += 16;
uint8x16_t vy = vqtbl4q_u8(vtable0123, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable4567, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable89AB, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtableCDEF, vx);
vst1q_u8(output, vy); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
uint8x16_t vx = vld1q_u8(input);
uint8x16_t vy = vqtbl4q_u8(vtable0123, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable4567, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable89AB, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtableCDEF, vx);
uint8x8_t vy_lo = vget_low_u8(vy);
if (batch & (8 * sizeof(uint8_t))) {
vst1_u8(output, vy_lo); output += 8;
vy_lo = vget_high_u8(vy);
}
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy_lo), 0); output += 4;
vy_lo = vext_u8(vy_lo, vy_lo, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy_lo), 0); output += 2;
vy_lo = vext_u8(vy_lo, vy_lo, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vy_lo, 0);
}
}
}
| 3,188
| 28.527778
| 80
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-aarch64-neon-tbx128x4-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/neon-tbx128x4.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__aarch64_neon_tbx128x4_x48(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint8x16x4_t vtable0123 = vld1q_u8_x4(table);
const uint8x16x4_t vtable4567 = vld1q_u8_x4(table + 64);
const uint8x16x4_t vtable89AB = vld1q_u8_x4(table + 128);
const uint8x16x4_t vtableCDEF = vld1q_u8_x4(table + 192);
const uint8x16_t voffset = vmovq_n_u8(64);
for (; batch >= 48 * sizeof(uint8_t); batch -= 48 * sizeof(uint8_t)) {
uint8x16_t vx0 = vld1q_u8(input); input += 16;
uint8x16_t vx1 = vld1q_u8(input); input += 16;
uint8x16_t vx2 = vld1q_u8(input); input += 16;
uint8x16_t vy0 = vqtbl4q_u8(vtable0123, vx0);
vx0 = vsubq_u8(vx0, voffset);
uint8x16_t vy1 = vqtbl4q_u8(vtable0123, vx1);
vx1 = vsubq_u8(vx1, voffset);
uint8x16_t vy2 = vqtbl4q_u8(vtable0123, vx2);
vx2 = vsubq_u8(vx2, voffset);
vy0 = vqtbx4q_u8(vy0, vtable4567, vx0);
vx0 = vsubq_u8(vx0, voffset);
vy1 = vqtbx4q_u8(vy1, vtable4567, vx1);
vx1 = vsubq_u8(vx1, voffset);
vy2 = vqtbx4q_u8(vy2, vtable4567, vx2);
vx2 = vsubq_u8(vx2, voffset);
vy0 = vqtbx4q_u8(vy0, vtable89AB, vx0);
vx0 = vsubq_u8(vx0, voffset);
vy1 = vqtbx4q_u8(vy1, vtable89AB, vx1);
vx1 = vsubq_u8(vx1, voffset);
vy2 = vqtbx4q_u8(vy2, vtable89AB, vx2);
vx2 = vsubq_u8(vx2, voffset);
vy0 = vqtbx4q_u8(vy0, vtableCDEF, vx0);
vy1 = vqtbx4q_u8(vy1, vtableCDEF, vx1);
vy2 = vqtbx4q_u8(vy2, vtableCDEF, vx2);
vst1q_u8(output, vy0); output += 16;
vst1q_u8(output, vy1); output += 16;
vst1q_u8(output, vy2); output += 16;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
uint8x16_t vx = vld1q_u8(input); input += 16;
uint8x16_t vy = vqtbl4q_u8(vtable0123, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable4567, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable89AB, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtableCDEF, vx);
vst1q_u8(output, vy); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
uint8x16_t vx = vld1q_u8(input);
uint8x16_t vy = vqtbl4q_u8(vtable0123, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable4567, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable89AB, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtableCDEF, vx);
uint8x8_t vy_lo = vget_low_u8(vy);
if (batch & (8 * sizeof(uint8_t))) {
vst1_u8(output, vy_lo); output += 8;
vy_lo = vget_high_u8(vy);
}
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy_lo), 0); output += 4;
vy_lo = vext_u8(vy_lo, vy_lo, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy_lo), 0); output += 2;
vy_lo = vext_u8(vy_lo, vy_lo, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vy_lo, 0);
}
}
}
| 3,564
| 29.470085
| 80
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-aarch64-neon-tbx128x4-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/neon-tbx128x4.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__aarch64_neon_tbx128x4_x64(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint8x16x4_t vtable0123 = vld1q_u8_x4(table);
const uint8x16x4_t vtable4567 = vld1q_u8_x4(table + 64);
const uint8x16x4_t vtable89AB = vld1q_u8_x4(table + 128);
const uint8x16x4_t vtableCDEF = vld1q_u8_x4(table + 192);
const uint8x16_t voffset = vmovq_n_u8(64);
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
uint8x16_t vx0 = vld1q_u8(input); input += 16;
uint8x16_t vx1 = vld1q_u8(input); input += 16;
uint8x16_t vx2 = vld1q_u8(input); input += 16;
uint8x16_t vx3 = vld1q_u8(input); input += 16;
uint8x16_t vy0 = vqtbl4q_u8(vtable0123, vx0);
vx0 = vsubq_u8(vx0, voffset);
uint8x16_t vy1 = vqtbl4q_u8(vtable0123, vx1);
vx1 = vsubq_u8(vx1, voffset);
uint8x16_t vy2 = vqtbl4q_u8(vtable0123, vx2);
vx2 = vsubq_u8(vx2, voffset);
uint8x16_t vy3 = vqtbl4q_u8(vtable0123, vx3);
vx3 = vsubq_u8(vx3, voffset);
vy0 = vqtbx4q_u8(vy0, vtable4567, vx0);
vx0 = vsubq_u8(vx0, voffset);
vy1 = vqtbx4q_u8(vy1, vtable4567, vx1);
vx1 = vsubq_u8(vx1, voffset);
vy2 = vqtbx4q_u8(vy2, vtable4567, vx2);
vx2 = vsubq_u8(vx2, voffset);
vy3 = vqtbx4q_u8(vy3, vtable4567, vx3);
vx3 = vsubq_u8(vx3, voffset);
vy0 = vqtbx4q_u8(vy0, vtable89AB, vx0);
vx0 = vsubq_u8(vx0, voffset);
vy1 = vqtbx4q_u8(vy1, vtable89AB, vx1);
vx1 = vsubq_u8(vx1, voffset);
vy2 = vqtbx4q_u8(vy2, vtable89AB, vx2);
vx2 = vsubq_u8(vx2, voffset);
vy3 = vqtbx4q_u8(vy3, vtable89AB, vx3);
vx3 = vsubq_u8(vx3, voffset);
vy0 = vqtbx4q_u8(vy0, vtableCDEF, vx0);
vy1 = vqtbx4q_u8(vy1, vtableCDEF, vx1);
vy2 = vqtbx4q_u8(vy2, vtableCDEF, vx2);
vy3 = vqtbx4q_u8(vy3, vtableCDEF, vx3);
vst1q_u8(output, vy0); output += 16;
vst1q_u8(output, vy1); output += 16;
vst1q_u8(output, vy2); output += 16;
vst1q_u8(output, vy3); output += 16;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
uint8x16_t vx = vld1q_u8(input); input += 16;
uint8x16_t vy = vqtbl4q_u8(vtable0123, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable4567, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable89AB, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtableCDEF, vx);
vst1q_u8(output, vy); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
uint8x16_t vx = vld1q_u8(input);
uint8x16_t vy = vqtbl4q_u8(vtable0123, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable4567, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtable89AB, vx);
vx = vsubq_u8(vx, voffset);
vy = vqtbx4q_u8(vy, vtableCDEF, vx);
uint8x8_t vy_lo = vget_low_u8(vy);
if (batch & (8 * sizeof(uint8_t))) {
vst1_u8(output, vy_lo); output += 8;
vy_lo = vget_high_u8(vy);
}
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy_lo), 0); output += 4;
vy_lo = vext_u8(vy_lo, vy_lo, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy_lo), 0); output += 2;
vy_lo = vext_u8(vy_lo, vy_lo, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vy_lo, 0);
}
}
}
| 3,940
| 30.277778
| 80
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
void xnn_x8_lut_ukernel__avx_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vt0 = _mm_load_si128((const __m128i*) table);
const __m128i vt1 = _mm_load_si128((const __m128i*) (table + 16));
const __m128i vt2 = _mm_load_si128((const __m128i*) (table + 32));
const __m128i vt3 = _mm_load_si128((const __m128i*) (table + 48));
const __m128i vt4 = _mm_load_si128((const __m128i*) (table + 64));
const __m128i vt5 = _mm_load_si128((const __m128i*) (table + 80));
const __m128i vt6 = _mm_load_si128((const __m128i*) (table + 96));
const __m128i vt7 = _mm_load_si128((const __m128i*) (table + 112));
const __m128i vt8 = _mm_load_si128((const __m128i*) (table + 128));
const __m128i vt9 = _mm_load_si128((const __m128i*) (table + 144));
const __m128i vtA = _mm_load_si128((const __m128i*) (table + 160));
const __m128i vtB = _mm_load_si128((const __m128i*) (table + 176));
const __m128i vtC = _mm_load_si128((const __m128i*) (table + 192));
const __m128i vtD = _mm_load_si128((const __m128i*) (table + 208));
const __m128i vtE = _mm_load_si128((const __m128i*) (table + 224));
const __m128i vtF = _mm_load_si128((const __m128i*) (table + 240));
const __m128i vtable0 = vt0;
const __m128i vtable1 = _mm_xor_si128(vt0, vt1);
const __m128i vtable2 = _mm_xor_si128(vt1, vt2);
const __m128i vtable3 = _mm_xor_si128(vt2, vt3);
const __m128i vtable4 = _mm_xor_si128(vt3, vt4);
const __m128i vtable5 = _mm_xor_si128(vt4, vt5);
const __m128i vtable6 = _mm_xor_si128(vt5, vt6);
const __m128i vtable7 = _mm_xor_si128(vt6, vt7);
const __m128i vtable8 = _mm_xor_si128(_mm_xor_si128(vt7, vt8), vtable0);
const __m128i vtable9 = _mm_xor_si128(_mm_xor_si128(vt8, vt9), vtable1);
const __m128i vtableA = _mm_xor_si128(_mm_xor_si128(vt9, vtA), vtable2);
const __m128i vtableB = _mm_xor_si128(_mm_xor_si128(vtA, vtB), vtable3);
const __m128i vtableC = _mm_xor_si128(_mm_xor_si128(vtB, vtC), vtable4);
const __m128i vtableD = _mm_xor_si128(_mm_xor_si128(vtC, vtD), vtable5);
const __m128i vtableE = _mm_xor_si128(_mm_xor_si128(vtD, vtE), vtable6);
const __m128i vtableF = _mm_xor_si128(_mm_xor_si128(vtE, vtF), vtable7);
const __m128i voffset = _mm_set1_epi8(16);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vy = _mm_shuffle_epi8(vtable0, vx);
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vy = _mm_shuffle_epi8(vtable0, vx);
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 6,625
| 39.650307
| 74
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
void xnn_x8_lut_ukernel__avx_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vt0 = _mm_load_si128((const __m128i*) table);
const __m128i vt1 = _mm_load_si128((const __m128i*) (table + 16));
const __m128i vt2 = _mm_load_si128((const __m128i*) (table + 32));
const __m128i vt3 = _mm_load_si128((const __m128i*) (table + 48));
const __m128i vt4 = _mm_load_si128((const __m128i*) (table + 64));
const __m128i vt5 = _mm_load_si128((const __m128i*) (table + 80));
const __m128i vt6 = _mm_load_si128((const __m128i*) (table + 96));
const __m128i vt7 = _mm_load_si128((const __m128i*) (table + 112));
const __m128i vt8 = _mm_load_si128((const __m128i*) (table + 128));
const __m128i vt9 = _mm_load_si128((const __m128i*) (table + 144));
const __m128i vtA = _mm_load_si128((const __m128i*) (table + 160));
const __m128i vtB = _mm_load_si128((const __m128i*) (table + 176));
const __m128i vtC = _mm_load_si128((const __m128i*) (table + 192));
const __m128i vtD = _mm_load_si128((const __m128i*) (table + 208));
const __m128i vtE = _mm_load_si128((const __m128i*) (table + 224));
const __m128i vtF = _mm_load_si128((const __m128i*) (table + 240));
const __m128i vtable0 = vt0;
const __m128i vtable1 = _mm_xor_si128(vt0, vt1);
const __m128i vtable2 = _mm_xor_si128(vt1, vt2);
const __m128i vtable3 = _mm_xor_si128(vt2, vt3);
const __m128i vtable4 = _mm_xor_si128(vt3, vt4);
const __m128i vtable5 = _mm_xor_si128(vt4, vt5);
const __m128i vtable6 = _mm_xor_si128(vt5, vt6);
const __m128i vtable7 = _mm_xor_si128(vt6, vt7);
const __m128i vtable8 = _mm_xor_si128(_mm_xor_si128(vt7, vt8), vtable0);
const __m128i vtable9 = _mm_xor_si128(_mm_xor_si128(vt8, vt9), vtable1);
const __m128i vtableA = _mm_xor_si128(_mm_xor_si128(vt9, vtA), vtable2);
const __m128i vtableB = _mm_xor_si128(_mm_xor_si128(vtA, vtB), vtable3);
const __m128i vtableC = _mm_xor_si128(_mm_xor_si128(vtB, vtC), vtable4);
const __m128i vtableD = _mm_xor_si128(_mm_xor_si128(vtC, vtD), vtable5);
const __m128i vtableE = _mm_xor_si128(_mm_xor_si128(vtD, vtE), vtable6);
const __m128i vtableF = _mm_xor_si128(_mm_xor_si128(vtE, vtF), vtable7);
const __m128i voffset = _mm_set1_epi8(16);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
__m128i vx0 = _mm_loadu_si128((const __m128i*) input);
__m128i vx1 = _mm_loadu_si128((const __m128i*) (input + 16));
input += 32;
__m128i vy0 = _mm_shuffle_epi8(vtable0, vx0);
__m128i vy1 = _mm_shuffle_epi8(vtable0, vx1);
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable1, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable1, vx1));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable2, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable2, vx1));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable3, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable3, vx1));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable4, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable4, vx1));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable5, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable5, vx1));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable6, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable6, vx1));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable7, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable7, vx1));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable8, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable8, vx1));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable9, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable9, vx1));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableA, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableA, vx1));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableB, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableB, vx1));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableC, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableC, vx1));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableD, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableD, vx1));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableE, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableE, vx1));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableF, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableF, vx1));
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vy = _mm_shuffle_epi8(vtable0, vx);
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vy = _mm_shuffle_epi8(vtable0, vx);
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 10,079
| 41.531646
| 74
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
void xnn_x8_lut_ukernel__avx_x48(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vt0 = _mm_load_si128((const __m128i*) table);
const __m128i vt1 = _mm_load_si128((const __m128i*) (table + 16));
const __m128i vt2 = _mm_load_si128((const __m128i*) (table + 32));
const __m128i vt3 = _mm_load_si128((const __m128i*) (table + 48));
const __m128i vt4 = _mm_load_si128((const __m128i*) (table + 64));
const __m128i vt5 = _mm_load_si128((const __m128i*) (table + 80));
const __m128i vt6 = _mm_load_si128((const __m128i*) (table + 96));
const __m128i vt7 = _mm_load_si128((const __m128i*) (table + 112));
const __m128i vt8 = _mm_load_si128((const __m128i*) (table + 128));
const __m128i vt9 = _mm_load_si128((const __m128i*) (table + 144));
const __m128i vtA = _mm_load_si128((const __m128i*) (table + 160));
const __m128i vtB = _mm_load_si128((const __m128i*) (table + 176));
const __m128i vtC = _mm_load_si128((const __m128i*) (table + 192));
const __m128i vtD = _mm_load_si128((const __m128i*) (table + 208));
const __m128i vtE = _mm_load_si128((const __m128i*) (table + 224));
const __m128i vtF = _mm_load_si128((const __m128i*) (table + 240));
const __m128i vtable0 = vt0;
const __m128i vtable1 = _mm_xor_si128(vt0, vt1);
const __m128i vtable2 = _mm_xor_si128(vt1, vt2);
const __m128i vtable3 = _mm_xor_si128(vt2, vt3);
const __m128i vtable4 = _mm_xor_si128(vt3, vt4);
const __m128i vtable5 = _mm_xor_si128(vt4, vt5);
const __m128i vtable6 = _mm_xor_si128(vt5, vt6);
const __m128i vtable7 = _mm_xor_si128(vt6, vt7);
const __m128i vtable8 = _mm_xor_si128(_mm_xor_si128(vt7, vt8), vtable0);
const __m128i vtable9 = _mm_xor_si128(_mm_xor_si128(vt8, vt9), vtable1);
const __m128i vtableA = _mm_xor_si128(_mm_xor_si128(vt9, vtA), vtable2);
const __m128i vtableB = _mm_xor_si128(_mm_xor_si128(vtA, vtB), vtable3);
const __m128i vtableC = _mm_xor_si128(_mm_xor_si128(vtB, vtC), vtable4);
const __m128i vtableD = _mm_xor_si128(_mm_xor_si128(vtC, vtD), vtable5);
const __m128i vtableE = _mm_xor_si128(_mm_xor_si128(vtD, vtE), vtable6);
const __m128i vtableF = _mm_xor_si128(_mm_xor_si128(vtE, vtF), vtable7);
const __m128i voffset = _mm_set1_epi8(16);
for (; batch >= 48 * sizeof(uint8_t); batch -= 48 * sizeof(uint8_t)) {
__m128i vx0 = _mm_loadu_si128((const __m128i*) input);
__m128i vx1 = _mm_loadu_si128((const __m128i*) (input + 16));
__m128i vx2 = _mm_loadu_si128((const __m128i*) (input + 32));
input += 48;
__m128i vy0 = _mm_shuffle_epi8(vtable0, vx0);
__m128i vy1 = _mm_shuffle_epi8(vtable0, vx1);
__m128i vy2 = _mm_shuffle_epi8(vtable0, vx2);
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable1, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable1, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable1, vx2));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable2, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable2, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable2, vx2));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable3, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable3, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable3, vx2));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable4, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable4, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable4, vx2));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable5, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable5, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable5, vx2));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable6, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable6, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable6, vx2));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable7, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable7, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable7, vx2));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable8, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable8, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable8, vx2));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vx2 = _mm_subs_epi8(vx2, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable9, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable9, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable9, vx2));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vx2 = _mm_subs_epi8(vx2, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableA, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableA, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableA, vx2));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vx2 = _mm_subs_epi8(vx2, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableB, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableB, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableB, vx2));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vx2 = _mm_subs_epi8(vx2, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableC, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableC, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableC, vx2));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vx2 = _mm_subs_epi8(vx2, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableD, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableD, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableD, vx2));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vx2 = _mm_subs_epi8(vx2, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableE, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableE, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableE, vx2));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vx2 = _mm_subs_epi8(vx2, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableF, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableF, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableF, vx2));
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
_mm_storeu_si128((__m128i*) (output + 32), vy2);
output += 48;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vy = _mm_shuffle_epi8(vtable0, vx);
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vy = _mm_shuffle_epi8(vtable0, vx);
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 11,755
| 42.540741
| 74
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
void xnn_x8_lut_ukernel__avx_x64(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vt0 = _mm_load_si128((const __m128i*) table);
const __m128i vt1 = _mm_load_si128((const __m128i*) (table + 16));
const __m128i vt2 = _mm_load_si128((const __m128i*) (table + 32));
const __m128i vt3 = _mm_load_si128((const __m128i*) (table + 48));
const __m128i vt4 = _mm_load_si128((const __m128i*) (table + 64));
const __m128i vt5 = _mm_load_si128((const __m128i*) (table + 80));
const __m128i vt6 = _mm_load_si128((const __m128i*) (table + 96));
const __m128i vt7 = _mm_load_si128((const __m128i*) (table + 112));
const __m128i vt8 = _mm_load_si128((const __m128i*) (table + 128));
const __m128i vt9 = _mm_load_si128((const __m128i*) (table + 144));
const __m128i vtA = _mm_load_si128((const __m128i*) (table + 160));
const __m128i vtB = _mm_load_si128((const __m128i*) (table + 176));
const __m128i vtC = _mm_load_si128((const __m128i*) (table + 192));
const __m128i vtD = _mm_load_si128((const __m128i*) (table + 208));
const __m128i vtE = _mm_load_si128((const __m128i*) (table + 224));
const __m128i vtF = _mm_load_si128((const __m128i*) (table + 240));
const __m128i vtable0 = vt0;
const __m128i vtable1 = _mm_xor_si128(vt0, vt1);
const __m128i vtable2 = _mm_xor_si128(vt1, vt2);
const __m128i vtable3 = _mm_xor_si128(vt2, vt3);
const __m128i vtable4 = _mm_xor_si128(vt3, vt4);
const __m128i vtable5 = _mm_xor_si128(vt4, vt5);
const __m128i vtable6 = _mm_xor_si128(vt5, vt6);
const __m128i vtable7 = _mm_xor_si128(vt6, vt7);
const __m128i vtable8 = _mm_xor_si128(_mm_xor_si128(vt7, vt8), vtable0);
const __m128i vtable9 = _mm_xor_si128(_mm_xor_si128(vt8, vt9), vtable1);
const __m128i vtableA = _mm_xor_si128(_mm_xor_si128(vt9, vtA), vtable2);
const __m128i vtableB = _mm_xor_si128(_mm_xor_si128(vtA, vtB), vtable3);
const __m128i vtableC = _mm_xor_si128(_mm_xor_si128(vtB, vtC), vtable4);
const __m128i vtableD = _mm_xor_si128(_mm_xor_si128(vtC, vtD), vtable5);
const __m128i vtableE = _mm_xor_si128(_mm_xor_si128(vtD, vtE), vtable6);
const __m128i vtableF = _mm_xor_si128(_mm_xor_si128(vtE, vtF), vtable7);
const __m128i voffset = _mm_set1_epi8(16);
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
__m128i vx0 = _mm_loadu_si128((const __m128i*) input);
__m128i vx1 = _mm_loadu_si128((const __m128i*) (input + 16));
__m128i vx2 = _mm_loadu_si128((const __m128i*) (input + 32));
__m128i vx3 = _mm_loadu_si128((const __m128i*) (input + 48));
input += 64;
__m128i vy0 = _mm_shuffle_epi8(vtable0, vx0);
__m128i vy1 = _mm_shuffle_epi8(vtable0, vx1);
__m128i vy2 = _mm_shuffle_epi8(vtable0, vx2);
__m128i vy3 = _mm_shuffle_epi8(vtable0, vx3);
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vx3 = _mm_sub_epi8(vx3, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable1, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable1, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable1, vx2));
vy3 = _mm_xor_si128(vy3, _mm_shuffle_epi8(vtable1, vx3));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vx3 = _mm_sub_epi8(vx3, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable2, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable2, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable2, vx2));
vy3 = _mm_xor_si128(vy3, _mm_shuffle_epi8(vtable2, vx3));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vx3 = _mm_sub_epi8(vx3, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable3, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable3, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable3, vx2));
vy3 = _mm_xor_si128(vy3, _mm_shuffle_epi8(vtable3, vx3));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vx3 = _mm_sub_epi8(vx3, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable4, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable4, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable4, vx2));
vy3 = _mm_xor_si128(vy3, _mm_shuffle_epi8(vtable4, vx3));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vx3 = _mm_sub_epi8(vx3, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable5, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable5, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable5, vx2));
vy3 = _mm_xor_si128(vy3, _mm_shuffle_epi8(vtable5, vx3));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vx3 = _mm_sub_epi8(vx3, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable6, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable6, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable6, vx2));
vy3 = _mm_xor_si128(vy3, _mm_shuffle_epi8(vtable6, vx3));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vx3 = _mm_sub_epi8(vx3, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable7, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable7, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable7, vx2));
vy3 = _mm_xor_si128(vy3, _mm_shuffle_epi8(vtable7, vx3));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vx2 = _mm_sub_epi8(vx2, voffset);
vx3 = _mm_sub_epi8(vx3, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable8, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable8, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable8, vx2));
vy3 = _mm_xor_si128(vy3, _mm_shuffle_epi8(vtable8, vx3));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vx2 = _mm_subs_epi8(vx2, voffset);
vx3 = _mm_subs_epi8(vx3, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable9, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable9, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable9, vx2));
vy3 = _mm_xor_si128(vy3, _mm_shuffle_epi8(vtable9, vx3));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vx2 = _mm_subs_epi8(vx2, voffset);
vx3 = _mm_subs_epi8(vx3, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableA, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableA, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableA, vx2));
vy3 = _mm_xor_si128(vy3, _mm_shuffle_epi8(vtableA, vx3));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vx2 = _mm_subs_epi8(vx2, voffset);
vx3 = _mm_subs_epi8(vx3, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableB, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableB, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableB, vx2));
vy3 = _mm_xor_si128(vy3, _mm_shuffle_epi8(vtableB, vx3));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vx2 = _mm_subs_epi8(vx2, voffset);
vx3 = _mm_subs_epi8(vx3, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableC, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableC, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableC, vx2));
vy3 = _mm_xor_si128(vy3, _mm_shuffle_epi8(vtableC, vx3));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vx2 = _mm_subs_epi8(vx2, voffset);
vx3 = _mm_subs_epi8(vx3, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableD, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableD, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableD, vx2));
vy3 = _mm_xor_si128(vy3, _mm_shuffle_epi8(vtableD, vx3));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vx2 = _mm_subs_epi8(vx2, voffset);
vx3 = _mm_subs_epi8(vx3, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableE, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableE, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableE, vx2));
vy3 = _mm_xor_si128(vy3, _mm_shuffle_epi8(vtableE, vx3));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vx2 = _mm_subs_epi8(vx2, voffset);
vx3 = _mm_subs_epi8(vx3, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableF, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableF, vx1));
vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableF, vx2));
vy3 = _mm_xor_si128(vy3, _mm_shuffle_epi8(vtableF, vx3));
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
_mm_storeu_si128((__m128i*) (output + 32), vy2);
_mm_storeu_si128((__m128i*) (output + 48), vy3);
output += 64;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vy = _mm_shuffle_epi8(vtable0, vx);
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vy = _mm_shuffle_epi8(vtable0, vx);
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 13,431
| 43.330033
| 74
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx2-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__avx2_x128(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vt0 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) table));
const __m256i vt1 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 16)));
const __m256i vt2 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 32)));
const __m256i vt3 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 48)));
const __m256i vt4 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 64)));
const __m256i vt5 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 80)));
const __m256i vt6 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 96)));
const __m256i vt7 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 112)));
const __m256i vt8 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 128)));
const __m256i vt9 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 144)));
const __m256i vtA = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 160)));
const __m256i vtB = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 176)));
const __m256i vtC = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 192)));
const __m256i vtD = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 208)));
const __m256i vtE = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 224)));
const __m256i vtF = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 240)));
const __m256i vtable0 = vt0;
const __m256i vtable1 = _mm256_xor_si256(vt0, vt1);
const __m256i vtable2 = _mm256_xor_si256(vt1, vt2);
const __m256i vtable3 = _mm256_xor_si256(vt2, vt3);
const __m256i vtable4 = _mm256_xor_si256(vt3, vt4);
const __m256i vtable5 = _mm256_xor_si256(vt4, vt5);
const __m256i vtable6 = _mm256_xor_si256(vt5, vt6);
const __m256i vtable7 = _mm256_xor_si256(vt6, vt7);
const __m256i vtable8 = _mm256_xor_si256(_mm256_xor_si256(vt7, vt8), vtable0);
const __m256i vtable9 = _mm256_xor_si256(_mm256_xor_si256(vt8, vt9), vtable1);
const __m256i vtableA = _mm256_xor_si256(_mm256_xor_si256(vt9, vtA), vtable2);
const __m256i vtableB = _mm256_xor_si256(_mm256_xor_si256(vtA, vtB), vtable3);
const __m256i vtableC = _mm256_xor_si256(_mm256_xor_si256(vtB, vtC), vtable4);
const __m256i vtableD = _mm256_xor_si256(_mm256_xor_si256(vtC, vtD), vtable5);
const __m256i vtableE = _mm256_xor_si256(_mm256_xor_si256(vtD, vtE), vtable6);
const __m256i vtableF = _mm256_xor_si256(_mm256_xor_si256(vtE, vtF), vtable7);
const __m256i voffset = _mm256_set1_epi8(16);
for (; batch >= 128 * sizeof(uint8_t); batch -= 128 * sizeof(uint8_t)) {
__m256i vx0 = _mm256_loadu_si256((const __m256i*) input);
__m256i vx1 = _mm256_loadu_si256((const __m256i*) (input + 32));
__m256i vx2 = _mm256_loadu_si256((const __m256i*) (input + 64));
__m256i vx3 = _mm256_loadu_si256((const __m256i*) (input + 96));
input += 128;
__m256i vy0 = _mm256_shuffle_epi8(vtable0, vx0);
__m256i vy1 = _mm256_shuffle_epi8(vtable0, vx1);
__m256i vy2 = _mm256_shuffle_epi8(vtable0, vx2);
__m256i vy3 = _mm256_shuffle_epi8(vtable0, vx3);
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vx3 = _mm256_sub_epi8(vx3, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable1, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable1, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable1, vx2));
vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable1, vx3));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vx3 = _mm256_sub_epi8(vx3, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable2, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable2, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable2, vx2));
vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable2, vx3));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vx3 = _mm256_sub_epi8(vx3, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable3, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable3, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable3, vx2));
vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable3, vx3));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vx3 = _mm256_sub_epi8(vx3, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable4, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable4, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable4, vx2));
vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable4, vx3));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vx3 = _mm256_sub_epi8(vx3, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable5, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable5, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable5, vx2));
vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable5, vx3));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vx3 = _mm256_sub_epi8(vx3, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable6, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable6, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable6, vx2));
vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable6, vx3));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vx3 = _mm256_sub_epi8(vx3, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable7, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable7, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable7, vx2));
vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable7, vx3));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vx3 = _mm256_sub_epi8(vx3, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable8, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable8, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable8, vx2));
vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable8, vx3));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vx2 = _mm256_subs_epi8(vx2, voffset);
vx3 = _mm256_subs_epi8(vx3, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable9, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable9, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable9, vx2));
vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable9, vx3));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vx2 = _mm256_subs_epi8(vx2, voffset);
vx3 = _mm256_subs_epi8(vx3, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableA, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableA, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableA, vx2));
vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtableA, vx3));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vx2 = _mm256_subs_epi8(vx2, voffset);
vx3 = _mm256_subs_epi8(vx3, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableB, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableB, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableB, vx2));
vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtableB, vx3));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vx2 = _mm256_subs_epi8(vx2, voffset);
vx3 = _mm256_subs_epi8(vx3, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableC, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableC, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableC, vx2));
vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtableC, vx3));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vx2 = _mm256_subs_epi8(vx2, voffset);
vx3 = _mm256_subs_epi8(vx3, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableD, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableD, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableD, vx2));
vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtableD, vx3));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vx2 = _mm256_subs_epi8(vx2, voffset);
vx3 = _mm256_subs_epi8(vx3, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableE, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableE, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableE, vx2));
vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtableE, vx3));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vx2 = _mm256_subs_epi8(vx2, voffset);
vx3 = _mm256_subs_epi8(vx3, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableF, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableF, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableF, vx2));
vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtableF, vx3));
_mm256_storeu_si256((__m256i*) output, vy0);
_mm256_storeu_si256((__m256i*) (output + 32), vy1);
_mm256_storeu_si256((__m256i*) (output + 64), vy2);
_mm256_storeu_si256((__m256i*) (output + 96), vy3);
output += 128;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx);
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable1), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable2), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable3), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable4), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable5), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable6), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable7), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable8), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable9), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableA), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableB), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableC), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableD), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableE), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableF), vx));
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx);
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable1), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable2), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable3), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable4), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable5), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable6), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable7), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable8), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable9), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableA), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableB), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableC), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableD), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableE), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableF), vx));
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 16,036
| 51.927393
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__avx2_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vt0 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) table));
const __m256i vt1 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 16)));
const __m256i vt2 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 32)));
const __m256i vt3 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 48)));
const __m256i vt4 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 64)));
const __m256i vt5 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 80)));
const __m256i vt6 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 96)));
const __m256i vt7 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 112)));
const __m256i vt8 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 128)));
const __m256i vt9 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 144)));
const __m256i vtA = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 160)));
const __m256i vtB = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 176)));
const __m256i vtC = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 192)));
const __m256i vtD = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 208)));
const __m256i vtE = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 224)));
const __m256i vtF = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 240)));
const __m256i vtable0 = vt0;
const __m256i vtable1 = _mm256_xor_si256(vt0, vt1);
const __m256i vtable2 = _mm256_xor_si256(vt1, vt2);
const __m256i vtable3 = _mm256_xor_si256(vt2, vt3);
const __m256i vtable4 = _mm256_xor_si256(vt3, vt4);
const __m256i vtable5 = _mm256_xor_si256(vt4, vt5);
const __m256i vtable6 = _mm256_xor_si256(vt5, vt6);
const __m256i vtable7 = _mm256_xor_si256(vt6, vt7);
const __m256i vtable8 = _mm256_xor_si256(_mm256_xor_si256(vt7, vt8), vtable0);
const __m256i vtable9 = _mm256_xor_si256(_mm256_xor_si256(vt8, vt9), vtable1);
const __m256i vtableA = _mm256_xor_si256(_mm256_xor_si256(vt9, vtA), vtable2);
const __m256i vtableB = _mm256_xor_si256(_mm256_xor_si256(vtA, vtB), vtable3);
const __m256i vtableC = _mm256_xor_si256(_mm256_xor_si256(vtB, vtC), vtable4);
const __m256i vtableD = _mm256_xor_si256(_mm256_xor_si256(vtC, vtD), vtable5);
const __m256i vtableE = _mm256_xor_si256(_mm256_xor_si256(vtD, vtE), vtable6);
const __m256i vtableF = _mm256_xor_si256(_mm256_xor_si256(vtE, vtF), vtable7);
const __m256i voffset = _mm256_set1_epi8(16);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
__m256i vx0 = _mm256_loadu_si256((const __m256i*) input);
input += 32;
__m256i vy0 = _mm256_shuffle_epi8(vtable0, vx0);
vx0 = _mm256_sub_epi8(vx0, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable1, vx0));
vx0 = _mm256_sub_epi8(vx0, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable2, vx0));
vx0 = _mm256_sub_epi8(vx0, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable3, vx0));
vx0 = _mm256_sub_epi8(vx0, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable4, vx0));
vx0 = _mm256_sub_epi8(vx0, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable5, vx0));
vx0 = _mm256_sub_epi8(vx0, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable6, vx0));
vx0 = _mm256_sub_epi8(vx0, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable7, vx0));
vx0 = _mm256_sub_epi8(vx0, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable8, vx0));
vx0 = _mm256_subs_epi8(vx0, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable9, vx0));
vx0 = _mm256_subs_epi8(vx0, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableA, vx0));
vx0 = _mm256_subs_epi8(vx0, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableB, vx0));
vx0 = _mm256_subs_epi8(vx0, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableC, vx0));
vx0 = _mm256_subs_epi8(vx0, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableD, vx0));
vx0 = _mm256_subs_epi8(vx0, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableE, vx0));
vx0 = _mm256_subs_epi8(vx0, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableF, vx0));
_mm256_storeu_si256((__m256i*) output, vy0);
output += 32;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx);
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable1), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable2), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable3), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable4), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable5), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable6), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable7), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable8), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable9), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableA), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableB), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableC), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableD), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableE), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableF), vx));
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx);
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable1), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable2), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable3), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable4), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable5), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable6), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable7), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable8), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable9), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableA), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableB), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableC), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableD), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableE), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableF), vx));
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 10,571
| 50.823529
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx2-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__avx2_x64(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vt0 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) table));
const __m256i vt1 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 16)));
const __m256i vt2 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 32)));
const __m256i vt3 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 48)));
const __m256i vt4 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 64)));
const __m256i vt5 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 80)));
const __m256i vt6 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 96)));
const __m256i vt7 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 112)));
const __m256i vt8 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 128)));
const __m256i vt9 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 144)));
const __m256i vtA = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 160)));
const __m256i vtB = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 176)));
const __m256i vtC = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 192)));
const __m256i vtD = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 208)));
const __m256i vtE = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 224)));
const __m256i vtF = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 240)));
const __m256i vtable0 = vt0;
const __m256i vtable1 = _mm256_xor_si256(vt0, vt1);
const __m256i vtable2 = _mm256_xor_si256(vt1, vt2);
const __m256i vtable3 = _mm256_xor_si256(vt2, vt3);
const __m256i vtable4 = _mm256_xor_si256(vt3, vt4);
const __m256i vtable5 = _mm256_xor_si256(vt4, vt5);
const __m256i vtable6 = _mm256_xor_si256(vt5, vt6);
const __m256i vtable7 = _mm256_xor_si256(vt6, vt7);
const __m256i vtable8 = _mm256_xor_si256(_mm256_xor_si256(vt7, vt8), vtable0);
const __m256i vtable9 = _mm256_xor_si256(_mm256_xor_si256(vt8, vt9), vtable1);
const __m256i vtableA = _mm256_xor_si256(_mm256_xor_si256(vt9, vtA), vtable2);
const __m256i vtableB = _mm256_xor_si256(_mm256_xor_si256(vtA, vtB), vtable3);
const __m256i vtableC = _mm256_xor_si256(_mm256_xor_si256(vtB, vtC), vtable4);
const __m256i vtableD = _mm256_xor_si256(_mm256_xor_si256(vtC, vtD), vtable5);
const __m256i vtableE = _mm256_xor_si256(_mm256_xor_si256(vtD, vtE), vtable6);
const __m256i vtableF = _mm256_xor_si256(_mm256_xor_si256(vtE, vtF), vtable7);
const __m256i voffset = _mm256_set1_epi8(16);
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
__m256i vx0 = _mm256_loadu_si256((const __m256i*) input);
__m256i vx1 = _mm256_loadu_si256((const __m256i*) (input + 32));
input += 64;
__m256i vy0 = _mm256_shuffle_epi8(vtable0, vx0);
__m256i vy1 = _mm256_shuffle_epi8(vtable0, vx1);
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable1, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable1, vx1));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable2, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable2, vx1));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable3, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable3, vx1));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable4, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable4, vx1));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable5, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable5, vx1));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable6, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable6, vx1));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable7, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable7, vx1));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable8, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable8, vx1));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable9, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable9, vx1));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableA, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableA, vx1));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableB, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableB, vx1));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableC, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableC, vx1));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableD, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableD, vx1));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableE, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableE, vx1));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableF, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableF, vx1));
_mm256_storeu_si256((__m256i*) output, vy0);
_mm256_storeu_si256((__m256i*) (output + 32), vy1);
output += 64;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx);
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable1), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable2), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable3), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable4), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable5), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable6), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable7), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable8), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable9), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableA), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableB), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableC), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableD), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableE), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableF), vx));
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx);
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable1), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable2), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable3), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable4), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable5), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable6), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable7), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable8), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable9), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableA), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableB), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableC), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableD), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableE), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableF), vx));
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 12,391
| 51.28692
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx2-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__avx2_x96(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vt0 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) table));
const __m256i vt1 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 16)));
const __m256i vt2 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 32)));
const __m256i vt3 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 48)));
const __m256i vt4 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 64)));
const __m256i vt5 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 80)));
const __m256i vt6 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 96)));
const __m256i vt7 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 112)));
const __m256i vt8 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 128)));
const __m256i vt9 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 144)));
const __m256i vtA = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 160)));
const __m256i vtB = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 176)));
const __m256i vtC = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 192)));
const __m256i vtD = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 208)));
const __m256i vtE = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 224)));
const __m256i vtF = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (table + 240)));
const __m256i vtable0 = vt0;
const __m256i vtable1 = _mm256_xor_si256(vt0, vt1);
const __m256i vtable2 = _mm256_xor_si256(vt1, vt2);
const __m256i vtable3 = _mm256_xor_si256(vt2, vt3);
const __m256i vtable4 = _mm256_xor_si256(vt3, vt4);
const __m256i vtable5 = _mm256_xor_si256(vt4, vt5);
const __m256i vtable6 = _mm256_xor_si256(vt5, vt6);
const __m256i vtable7 = _mm256_xor_si256(vt6, vt7);
const __m256i vtable8 = _mm256_xor_si256(_mm256_xor_si256(vt7, vt8), vtable0);
const __m256i vtable9 = _mm256_xor_si256(_mm256_xor_si256(vt8, vt9), vtable1);
const __m256i vtableA = _mm256_xor_si256(_mm256_xor_si256(vt9, vtA), vtable2);
const __m256i vtableB = _mm256_xor_si256(_mm256_xor_si256(vtA, vtB), vtable3);
const __m256i vtableC = _mm256_xor_si256(_mm256_xor_si256(vtB, vtC), vtable4);
const __m256i vtableD = _mm256_xor_si256(_mm256_xor_si256(vtC, vtD), vtable5);
const __m256i vtableE = _mm256_xor_si256(_mm256_xor_si256(vtD, vtE), vtable6);
const __m256i vtableF = _mm256_xor_si256(_mm256_xor_si256(vtE, vtF), vtable7);
const __m256i voffset = _mm256_set1_epi8(16);
for (; batch >= 96 * sizeof(uint8_t); batch -= 96 * sizeof(uint8_t)) {
__m256i vx0 = _mm256_loadu_si256((const __m256i*) input);
__m256i vx1 = _mm256_loadu_si256((const __m256i*) (input + 32));
__m256i vx2 = _mm256_loadu_si256((const __m256i*) (input + 64));
input += 96;
__m256i vy0 = _mm256_shuffle_epi8(vtable0, vx0);
__m256i vy1 = _mm256_shuffle_epi8(vtable0, vx1);
__m256i vy2 = _mm256_shuffle_epi8(vtable0, vx2);
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable1, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable1, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable1, vx2));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable2, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable2, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable2, vx2));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable3, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable3, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable3, vx2));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable4, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable4, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable4, vx2));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable5, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable5, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable5, vx2));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable6, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable6, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable6, vx2));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable7, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable7, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable7, vx2));
vx0 = _mm256_sub_epi8(vx0, voffset);
vx1 = _mm256_sub_epi8(vx1, voffset);
vx2 = _mm256_sub_epi8(vx2, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable8, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable8, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable8, vx2));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vx2 = _mm256_subs_epi8(vx2, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable9, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable9, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable9, vx2));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vx2 = _mm256_subs_epi8(vx2, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableA, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableA, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableA, vx2));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vx2 = _mm256_subs_epi8(vx2, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableB, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableB, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableB, vx2));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vx2 = _mm256_subs_epi8(vx2, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableC, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableC, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableC, vx2));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vx2 = _mm256_subs_epi8(vx2, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableD, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableD, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableD, vx2));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vx2 = _mm256_subs_epi8(vx2, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableE, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableE, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableE, vx2));
vx0 = _mm256_subs_epi8(vx0, voffset);
vx1 = _mm256_subs_epi8(vx1, voffset);
vx2 = _mm256_subs_epi8(vx2, voffset);
vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableF, vx0));
vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableF, vx1));
vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableF, vx2));
_mm256_storeu_si256((__m256i*) output, vy0);
_mm256_storeu_si256((__m256i*) (output + 32), vy1);
_mm256_storeu_si256((__m256i*) (output + 64), vy2);
output += 96;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx);
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable1), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable2), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable3), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable4), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable5), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable6), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable7), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable8), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable9), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableA), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableB), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableC), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableD), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableE), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableF), vx));
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx);
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable1), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable2), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable3), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable4), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable5), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable6), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable7), vx));
vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable8), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable9), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableA), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableB), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableC), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableD), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableE), vx));
vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableF), vx));
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 14,211
| 51.637037
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx512skx-vpshufb-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/avx512skx-vpshufb.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__avx512skx_vpshufb_x128(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vt0 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) table));
const __m512i vt1 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 16)));
const __m512i vt2 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 32)));
const __m512i vt3 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 48)));
const __m512i vt4 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 64)));
const __m512i vt5 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 80)));
const __m512i vt6 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 96)));
const __m512i vt7 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 112)));
const __m512i vt8 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 128)));
const __m512i vt9 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 144)));
const __m512i vtA = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 160)));
const __m512i vtB = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 176)));
const __m512i vtC = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 192)));
const __m512i vtD = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 208)));
const __m512i vtE = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 224)));
const __m512i vtF = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 240)));
const __m512i vtable0 = vt0;
const __m512i vtable1 = _mm512_xor_si512(vt0, vt1);
const __m512i vtable2 = _mm512_xor_si512(vt1, vt2);
const __m512i vtable3 = _mm512_xor_si512(vt2, vt3);
const __m512i vtable4 = _mm512_xor_si512(vt3, vt4);
const __m512i vtable5 = _mm512_xor_si512(vt4, vt5);
const __m512i vtable6 = _mm512_xor_si512(vt5, vt6);
const __m512i vtable7 = _mm512_xor_si512(vt6, vt7);
const __m512i vtable8 = _mm512_xor_si512(_mm512_xor_si512(vt7, vt8), vtable0);
const __m512i vtable9 = _mm512_xor_si512(_mm512_xor_si512(vt8, vt9), vtable1);
const __m512i vtableA = _mm512_xor_si512(_mm512_xor_si512(vt9, vtA), vtable2);
const __m512i vtableB = _mm512_xor_si512(_mm512_xor_si512(vtA, vtB), vtable3);
const __m512i vtableC = _mm512_xor_si512(_mm512_xor_si512(vtB, vtC), vtable4);
const __m512i vtableD = _mm512_xor_si512(_mm512_xor_si512(vtC, vtD), vtable5);
const __m512i vtableE = _mm512_xor_si512(_mm512_xor_si512(vtD, vtE), vtable6);
const __m512i vtableF = _mm512_xor_si512(_mm512_xor_si512(vtE, vtF), vtable7);
const __m512i voffset = _mm512_set1_epi8(16);
for (; batch >= 128 * sizeof(uint8_t); batch -= 128 * sizeof(uint8_t)) {
__m512i vx0 = _mm512_loadu_si512(input);
__m512i vx1 = _mm512_loadu_si512(input + 64);
input += 128;
__m512i vy0 = _mm512_shuffle_epi8(vtable0, vx0);
__m512i vy1 = _mm512_shuffle_epi8(vtable0, vx1);
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable1, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable1, vx1));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable2, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable2, vx1));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable3, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable3, vx1));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable4, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable4, vx1));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable5, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable5, vx1));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable6, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable6, vx1));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable7, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable7, vx1));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable8, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable8, vx1));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable9, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable9, vx1));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableA, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableA, vx1));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableB, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableB, vx1));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableC, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableC, vx1));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableD, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableD, vx1));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableE, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableE, vx1));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableF, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableF, vx1));
_mm512_storeu_si512(output, vy0);
_mm512_storeu_si512(output + 64, vy1);
output += 128;
}
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
__m512i vx = _mm512_loadu_si512(input);
input += 64;
__m512i vy = _mm512_shuffle_epi8(vtable0, vx);
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable1, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable2, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable3, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable4, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable5, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable6, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable7, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable8, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable9, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableA, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableB, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableC, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableD, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableE, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableF, vx));
_mm512_storeu_si512(output, vy);
output += 64;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch < 64);
const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << batch) - UINT64_C(1)));
__m512i vx = _mm512_maskz_loadu_epi8(vmask, input);
__m512i vy = _mm512_shuffle_epi8(vtable0, vx);
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable1, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable2, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable3, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable4, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable5, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable6, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable7, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable8, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable9, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableA, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableB, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableC, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableD, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableE, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableF, vx));
_mm512_mask_storeu_epi8(output, vmask, vy);
}
}
| 10,692
| 46.950673
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx512skx-vpshufb-x192.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/avx512skx-vpshufb.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__avx512skx_vpshufb_x192(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vt0 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) table));
const __m512i vt1 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 16)));
const __m512i vt2 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 32)));
const __m512i vt3 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 48)));
const __m512i vt4 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 64)));
const __m512i vt5 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 80)));
const __m512i vt6 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 96)));
const __m512i vt7 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 112)));
const __m512i vt8 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 128)));
const __m512i vt9 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 144)));
const __m512i vtA = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 160)));
const __m512i vtB = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 176)));
const __m512i vtC = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 192)));
const __m512i vtD = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 208)));
const __m512i vtE = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 224)));
const __m512i vtF = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 240)));
const __m512i vtable0 = vt0;
const __m512i vtable1 = _mm512_xor_si512(vt0, vt1);
const __m512i vtable2 = _mm512_xor_si512(vt1, vt2);
const __m512i vtable3 = _mm512_xor_si512(vt2, vt3);
const __m512i vtable4 = _mm512_xor_si512(vt3, vt4);
const __m512i vtable5 = _mm512_xor_si512(vt4, vt5);
const __m512i vtable6 = _mm512_xor_si512(vt5, vt6);
const __m512i vtable7 = _mm512_xor_si512(vt6, vt7);
const __m512i vtable8 = _mm512_xor_si512(_mm512_xor_si512(vt7, vt8), vtable0);
const __m512i vtable9 = _mm512_xor_si512(_mm512_xor_si512(vt8, vt9), vtable1);
const __m512i vtableA = _mm512_xor_si512(_mm512_xor_si512(vt9, vtA), vtable2);
const __m512i vtableB = _mm512_xor_si512(_mm512_xor_si512(vtA, vtB), vtable3);
const __m512i vtableC = _mm512_xor_si512(_mm512_xor_si512(vtB, vtC), vtable4);
const __m512i vtableD = _mm512_xor_si512(_mm512_xor_si512(vtC, vtD), vtable5);
const __m512i vtableE = _mm512_xor_si512(_mm512_xor_si512(vtD, vtE), vtable6);
const __m512i vtableF = _mm512_xor_si512(_mm512_xor_si512(vtE, vtF), vtable7);
const __m512i voffset = _mm512_set1_epi8(16);
for (; batch >= 192 * sizeof(uint8_t); batch -= 192 * sizeof(uint8_t)) {
__m512i vx0 = _mm512_loadu_si512(input);
__m512i vx1 = _mm512_loadu_si512(input + 64);
__m512i vx2 = _mm512_loadu_si512(input + 128);
input += 192;
__m512i vy0 = _mm512_shuffle_epi8(vtable0, vx0);
__m512i vy1 = _mm512_shuffle_epi8(vtable0, vx1);
__m512i vy2 = _mm512_shuffle_epi8(vtable0, vx2);
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable1, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable1, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable1, vx2));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable2, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable2, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable2, vx2));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable3, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable3, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable3, vx2));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable4, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable4, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable4, vx2));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable5, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable5, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable5, vx2));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable6, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable6, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable6, vx2));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable7, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable7, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable7, vx2));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable8, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable8, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable8, vx2));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vx2 = _mm512_subs_epi8(vx2, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable9, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable9, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable9, vx2));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vx2 = _mm512_subs_epi8(vx2, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableA, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableA, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableA, vx2));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vx2 = _mm512_subs_epi8(vx2, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableB, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableB, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableB, vx2));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vx2 = _mm512_subs_epi8(vx2, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableC, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableC, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableC, vx2));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vx2 = _mm512_subs_epi8(vx2, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableD, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableD, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableD, vx2));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vx2 = _mm512_subs_epi8(vx2, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableE, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableE, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableE, vx2));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vx2 = _mm512_subs_epi8(vx2, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableF, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableF, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableF, vx2));
_mm512_storeu_si512(output, vy0);
_mm512_storeu_si512(output + 64, vy1);
_mm512_storeu_si512(output + 128, vy2);
output += 192;
}
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
__m512i vx = _mm512_loadu_si512(input);
input += 64;
__m512i vy = _mm512_shuffle_epi8(vtable0, vx);
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable1, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable2, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable3, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable4, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable5, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable6, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable7, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable8, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable9, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableA, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableB, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableC, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableD, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableE, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableF, vx));
_mm512_storeu_si512(output, vy);
output += 64;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch < 64);
const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << batch) - UINT64_C(1)));
__m512i vx = _mm512_maskz_loadu_epi8(vmask, input);
__m512i vy = _mm512_shuffle_epi8(vtable0, vx);
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable1, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable2, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable3, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable4, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable5, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable6, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable7, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable8, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable9, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableA, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableB, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableC, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableD, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableE, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableF, vx));
_mm512_mask_storeu_epi8(output, vmask, vy);
}
}
| 12,482
| 47.761719
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx512skx-vpshufb-x256.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/avx512skx-vpshufb.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__avx512skx_vpshufb_x256(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vt0 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) table));
const __m512i vt1 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 16)));
const __m512i vt2 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 32)));
const __m512i vt3 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 48)));
const __m512i vt4 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 64)));
const __m512i vt5 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 80)));
const __m512i vt6 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 96)));
const __m512i vt7 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 112)));
const __m512i vt8 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 128)));
const __m512i vt9 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 144)));
const __m512i vtA = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 160)));
const __m512i vtB = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 176)));
const __m512i vtC = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 192)));
const __m512i vtD = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 208)));
const __m512i vtE = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 224)));
const __m512i vtF = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 240)));
const __m512i vtable0 = vt0;
const __m512i vtable1 = _mm512_xor_si512(vt0, vt1);
const __m512i vtable2 = _mm512_xor_si512(vt1, vt2);
const __m512i vtable3 = _mm512_xor_si512(vt2, vt3);
const __m512i vtable4 = _mm512_xor_si512(vt3, vt4);
const __m512i vtable5 = _mm512_xor_si512(vt4, vt5);
const __m512i vtable6 = _mm512_xor_si512(vt5, vt6);
const __m512i vtable7 = _mm512_xor_si512(vt6, vt7);
const __m512i vtable8 = _mm512_xor_si512(_mm512_xor_si512(vt7, vt8), vtable0);
const __m512i vtable9 = _mm512_xor_si512(_mm512_xor_si512(vt8, vt9), vtable1);
const __m512i vtableA = _mm512_xor_si512(_mm512_xor_si512(vt9, vtA), vtable2);
const __m512i vtableB = _mm512_xor_si512(_mm512_xor_si512(vtA, vtB), vtable3);
const __m512i vtableC = _mm512_xor_si512(_mm512_xor_si512(vtB, vtC), vtable4);
const __m512i vtableD = _mm512_xor_si512(_mm512_xor_si512(vtC, vtD), vtable5);
const __m512i vtableE = _mm512_xor_si512(_mm512_xor_si512(vtD, vtE), vtable6);
const __m512i vtableF = _mm512_xor_si512(_mm512_xor_si512(vtE, vtF), vtable7);
const __m512i voffset = _mm512_set1_epi8(16);
for (; batch >= 256 * sizeof(uint8_t); batch -= 256 * sizeof(uint8_t)) {
__m512i vx0 = _mm512_loadu_si512(input);
__m512i vx1 = _mm512_loadu_si512(input + 64);
__m512i vx2 = _mm512_loadu_si512(input + 128);
__m512i vx3 = _mm512_loadu_si512(input + 192);
input += 256;
__m512i vy0 = _mm512_shuffle_epi8(vtable0, vx0);
__m512i vy1 = _mm512_shuffle_epi8(vtable0, vx1);
__m512i vy2 = _mm512_shuffle_epi8(vtable0, vx2);
__m512i vy3 = _mm512_shuffle_epi8(vtable0, vx3);
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vx3 = _mm512_sub_epi8(vx3, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable1, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable1, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable1, vx2));
vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable1, vx3));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vx3 = _mm512_sub_epi8(vx3, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable2, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable2, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable2, vx2));
vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable2, vx3));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vx3 = _mm512_sub_epi8(vx3, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable3, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable3, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable3, vx2));
vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable3, vx3));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vx3 = _mm512_sub_epi8(vx3, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable4, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable4, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable4, vx2));
vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable4, vx3));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vx3 = _mm512_sub_epi8(vx3, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable5, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable5, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable5, vx2));
vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable5, vx3));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vx3 = _mm512_sub_epi8(vx3, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable6, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable6, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable6, vx2));
vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable6, vx3));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vx3 = _mm512_sub_epi8(vx3, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable7, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable7, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable7, vx2));
vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable7, vx3));
vx0 = _mm512_sub_epi8(vx0, voffset);
vx1 = _mm512_sub_epi8(vx1, voffset);
vx2 = _mm512_sub_epi8(vx2, voffset);
vx3 = _mm512_sub_epi8(vx3, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable8, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable8, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable8, vx2));
vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable8, vx3));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vx2 = _mm512_subs_epi8(vx2, voffset);
vx3 = _mm512_subs_epi8(vx3, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable9, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable9, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable9, vx2));
vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable9, vx3));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vx2 = _mm512_subs_epi8(vx2, voffset);
vx3 = _mm512_subs_epi8(vx3, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableA, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableA, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableA, vx2));
vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtableA, vx3));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vx2 = _mm512_subs_epi8(vx2, voffset);
vx3 = _mm512_subs_epi8(vx3, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableB, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableB, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableB, vx2));
vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtableB, vx3));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vx2 = _mm512_subs_epi8(vx2, voffset);
vx3 = _mm512_subs_epi8(vx3, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableC, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableC, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableC, vx2));
vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtableC, vx3));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vx2 = _mm512_subs_epi8(vx2, voffset);
vx3 = _mm512_subs_epi8(vx3, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableD, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableD, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableD, vx2));
vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtableD, vx3));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vx2 = _mm512_subs_epi8(vx2, voffset);
vx3 = _mm512_subs_epi8(vx3, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableE, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableE, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableE, vx2));
vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtableE, vx3));
vx0 = _mm512_subs_epi8(vx0, voffset);
vx1 = _mm512_subs_epi8(vx1, voffset);
vx2 = _mm512_subs_epi8(vx2, voffset);
vx3 = _mm512_subs_epi8(vx3, voffset);
vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableF, vx0));
vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableF, vx1));
vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableF, vx2));
vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtableF, vx3));
_mm512_storeu_si512(output, vy0);
_mm512_storeu_si512(output + 64, vy1);
_mm512_storeu_si512(output + 128, vy2);
_mm512_storeu_si512(output + 192, vy3);
output += 256;
}
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
__m512i vx = _mm512_loadu_si512(input);
input += 64;
__m512i vy = _mm512_shuffle_epi8(vtable0, vx);
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable1, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable2, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable3, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable4, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable5, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable6, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable7, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable8, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable9, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableA, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableB, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableC, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableD, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableE, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableF, vx));
_mm512_storeu_si512(output, vy);
output += 64;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch < 64);
const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << batch) - UINT64_C(1)));
__m512i vx = _mm512_maskz_loadu_epi8(vmask, input);
__m512i vy = _mm512_shuffle_epi8(vtable0, vx);
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable1, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable2, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable3, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable4, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable5, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable6, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable7, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable8, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable9, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableA, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableB, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableC, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableD, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableE, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableF, vx));
_mm512_mask_storeu_epi8(output, vmask, vy);
}
}
| 14,272
| 48.387543
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx512skx-vpshufb-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/avx512skx-vpshufb.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__avx512skx_vpshufb_x64(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vt0 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) table));
const __m512i vt1 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 16)));
const __m512i vt2 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 32)));
const __m512i vt3 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 48)));
const __m512i vt4 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 64)));
const __m512i vt5 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 80)));
const __m512i vt6 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 96)));
const __m512i vt7 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 112)));
const __m512i vt8 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 128)));
const __m512i vt9 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 144)));
const __m512i vtA = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 160)));
const __m512i vtB = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 176)));
const __m512i vtC = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 192)));
const __m512i vtD = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 208)));
const __m512i vtE = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 224)));
const __m512i vtF = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (table + 240)));
const __m512i vtable0 = vt0;
const __m512i vtable1 = _mm512_xor_si512(vt0, vt1);
const __m512i vtable2 = _mm512_xor_si512(vt1, vt2);
const __m512i vtable3 = _mm512_xor_si512(vt2, vt3);
const __m512i vtable4 = _mm512_xor_si512(vt3, vt4);
const __m512i vtable5 = _mm512_xor_si512(vt4, vt5);
const __m512i vtable6 = _mm512_xor_si512(vt5, vt6);
const __m512i vtable7 = _mm512_xor_si512(vt6, vt7);
const __m512i vtable8 = _mm512_xor_si512(_mm512_xor_si512(vt7, vt8), vtable0);
const __m512i vtable9 = _mm512_xor_si512(_mm512_xor_si512(vt8, vt9), vtable1);
const __m512i vtableA = _mm512_xor_si512(_mm512_xor_si512(vt9, vtA), vtable2);
const __m512i vtableB = _mm512_xor_si512(_mm512_xor_si512(vtA, vtB), vtable3);
const __m512i vtableC = _mm512_xor_si512(_mm512_xor_si512(vtB, vtC), vtable4);
const __m512i vtableD = _mm512_xor_si512(_mm512_xor_si512(vtC, vtD), vtable5);
const __m512i vtableE = _mm512_xor_si512(_mm512_xor_si512(vtD, vtE), vtable6);
const __m512i vtableF = _mm512_xor_si512(_mm512_xor_si512(vtE, vtF), vtable7);
const __m512i voffset = _mm512_set1_epi8(16);
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
__m512i vx = _mm512_loadu_si512(input);
input += 64;
__m512i vy = _mm512_shuffle_epi8(vtable0, vx);
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable1, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable2, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable3, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable4, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable5, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable6, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable7, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable8, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable9, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableA, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableB, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableC, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableD, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableE, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableF, vx));
_mm512_storeu_si512(output, vy);
output += 64;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch < 64);
const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << batch) - UINT64_C(1)));
__m512i vx = _mm512_maskz_loadu_epi8(vmask, input);
__m512i vy = _mm512_shuffle_epi8(vtable0, vx);
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable1, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable2, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable3, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable4, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable5, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable6, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable7, vx));
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable8, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable9, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableA, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableB, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableC, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableD, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableE, vx));
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableF, vx));
_mm512_mask_storeu_epi8(output, vmask, vy);
}
}
| 7,005
| 46.020134
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx512vbmi-vpermx2b-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/avx512vbmi-vpermx2b.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__avx512vbmi_vpermx2b_x128(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vtable0 = _mm512_loadu_si512(table);
const __m512i vtable1 = _mm512_loadu_si512(table + 64);
const __m512i vtable2 = _mm512_loadu_si512(table + 128);
const __m512i vtable3 = _mm512_loadu_si512(table + 192);
for (; batch >= 128 * sizeof(uint8_t); batch -= 128 * sizeof(uint8_t)) {
const __m512i vx0 = _mm512_loadu_si512(input);
const __m512i vx1 = _mm512_loadu_si512(input + 64);
input += 128;
__m512i vy0 = _mm512_permutex2var_epi8(vtable0, vx0, vtable1);
const __mmask64 vm0 = _mm512_movepi8_mask(vx0);
__m512i vy1 = _mm512_permutex2var_epi8(vtable0, vx1, vtable1);
const __mmask64 vm1 = _mm512_movepi8_mask(vx1);
const __m512i vt0 = _mm512_permutex2var_epi8(vtable2, vx0, vtable3);
const __m512i vt1 = _mm512_permutex2var_epi8(vtable2, vx1, vtable3);
vy0 = _mm512_mask_mov_epi8(vy0, vm0, vt0);
vy1 = _mm512_mask_mov_epi8(vy1, vm1, vt1);
_mm512_storeu_si512(output, vy0);
_mm512_storeu_si512(output + 64, vy1);
output += 128;
}
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
__m512i vx = _mm512_loadu_si512(input);
input += 64;
__m512i vy = _mm512_permutex2var_epi8(vtable0, vx, vtable1);
const __mmask64 vm = _mm512_movepi8_mask(vx);
const __m512i vt = _mm512_permutex2var_epi8(vtable2, vx, vtable3);
vy = _mm512_mask_mov_epi8(vy, vm, vt);
_mm512_storeu_si512(output, vy);
output += 64;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch < 64);
const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << batch) - UINT64_C(1)));
__m512i vx = _mm512_maskz_loadu_epi8(vmask, input);
__m512i vy = _mm512_maskz_permutex2var_epi8(vmask, vtable0, vx, vtable1);
const __mmask64 vm = _mm512_movepi8_mask(vx);
const __m512i vt = _mm512_maskz_permutex2var_epi8(vmask, vtable2, vx, vtable3);
vy = _mm512_mask_mov_epi8(vy, vm, vt);
_mm512_mask_storeu_epi8(output, vmask, vy);
}
}
| 2,673
| 32.012346
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx512vbmi-vpermx2b-x192.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/avx512vbmi-vpermx2b.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__avx512vbmi_vpermx2b_x192(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vtable0 = _mm512_loadu_si512(table);
const __m512i vtable1 = _mm512_loadu_si512(table + 64);
const __m512i vtable2 = _mm512_loadu_si512(table + 128);
const __m512i vtable3 = _mm512_loadu_si512(table + 192);
for (; batch >= 192 * sizeof(uint8_t); batch -= 192 * sizeof(uint8_t)) {
const __m512i vx0 = _mm512_loadu_si512(input);
const __m512i vx1 = _mm512_loadu_si512(input + 64);
const __m512i vx2 = _mm512_loadu_si512(input + 128);
input += 192;
__m512i vy0 = _mm512_permutex2var_epi8(vtable0, vx0, vtable1);
const __mmask64 vm0 = _mm512_movepi8_mask(vx0);
__m512i vy1 = _mm512_permutex2var_epi8(vtable0, vx1, vtable1);
const __mmask64 vm1 = _mm512_movepi8_mask(vx1);
__m512i vy2 = _mm512_permutex2var_epi8(vtable0, vx2, vtable1);
const __mmask64 vm2 = _mm512_movepi8_mask(vx2);
const __m512i vt0 = _mm512_permutex2var_epi8(vtable2, vx0, vtable3);
const __m512i vt1 = _mm512_permutex2var_epi8(vtable2, vx1, vtable3);
const __m512i vt2 = _mm512_permutex2var_epi8(vtable2, vx2, vtable3);
vy0 = _mm512_mask_mov_epi8(vy0, vm0, vt0);
vy1 = _mm512_mask_mov_epi8(vy1, vm1, vt1);
vy2 = _mm512_mask_mov_epi8(vy2, vm2, vt2);
_mm512_storeu_si512(output, vy0);
_mm512_storeu_si512(output + 64, vy1);
_mm512_storeu_si512(output + 128, vy2);
output += 192;
}
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
__m512i vx = _mm512_loadu_si512(input);
input += 64;
__m512i vy = _mm512_permutex2var_epi8(vtable0, vx, vtable1);
const __mmask64 vm = _mm512_movepi8_mask(vx);
const __m512i vt = _mm512_permutex2var_epi8(vtable2, vx, vtable3);
vy = _mm512_mask_mov_epi8(vy, vm, vt);
_mm512_storeu_si512(output, vy);
output += 64;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch < 64);
const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << batch) - UINT64_C(1)));
__m512i vx = _mm512_maskz_loadu_epi8(vmask, input);
__m512i vy = _mm512_maskz_permutex2var_epi8(vmask, vtable0, vx, vtable1);
const __mmask64 vm = _mm512_movepi8_mask(vx);
const __m512i vt = _mm512_maskz_permutex2var_epi8(vmask, vtable2, vx, vtable3);
vy = _mm512_mask_mov_epi8(vy, vm, vt);
_mm512_mask_storeu_epi8(output, vmask, vy);
}
}
| 3,013
| 33.643678
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx512vbmi-vpermx2b-x256.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/avx512vbmi-vpermx2b.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__avx512vbmi_vpermx2b_x256(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vtable0 = _mm512_loadu_si512(table);
const __m512i vtable1 = _mm512_loadu_si512(table + 64);
const __m512i vtable2 = _mm512_loadu_si512(table + 128);
const __m512i vtable3 = _mm512_loadu_si512(table + 192);
for (; batch >= 256 * sizeof(uint8_t); batch -= 256 * sizeof(uint8_t)) {
const __m512i vx0 = _mm512_loadu_si512(input);
const __m512i vx1 = _mm512_loadu_si512(input + 64);
const __m512i vx2 = _mm512_loadu_si512(input + 128);
const __m512i vx3 = _mm512_loadu_si512(input + 192);
input += 256;
__m512i vy0 = _mm512_permutex2var_epi8(vtable0, vx0, vtable1);
const __mmask64 vm0 = _mm512_movepi8_mask(vx0);
__m512i vy1 = _mm512_permutex2var_epi8(vtable0, vx1, vtable1);
const __mmask64 vm1 = _mm512_movepi8_mask(vx1);
__m512i vy2 = _mm512_permutex2var_epi8(vtable0, vx2, vtable1);
const __mmask64 vm2 = _mm512_movepi8_mask(vx2);
__m512i vy3 = _mm512_permutex2var_epi8(vtable0, vx3, vtable1);
const __mmask64 vm3 = _mm512_movepi8_mask(vx3);
const __m512i vt0 = _mm512_permutex2var_epi8(vtable2, vx0, vtable3);
const __m512i vt1 = _mm512_permutex2var_epi8(vtable2, vx1, vtable3);
const __m512i vt2 = _mm512_permutex2var_epi8(vtable2, vx2, vtable3);
const __m512i vt3 = _mm512_permutex2var_epi8(vtable2, vx3, vtable3);
vy0 = _mm512_mask_mov_epi8(vy0, vm0, vt0);
vy1 = _mm512_mask_mov_epi8(vy1, vm1, vt1);
vy2 = _mm512_mask_mov_epi8(vy2, vm2, vt2);
vy3 = _mm512_mask_mov_epi8(vy3, vm3, vt3);
_mm512_storeu_si512(output, vy0);
_mm512_storeu_si512(output + 64, vy1);
_mm512_storeu_si512(output + 128, vy2);
_mm512_storeu_si512(output + 192, vy3);
output += 256;
}
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
__m512i vx = _mm512_loadu_si512(input);
input += 64;
__m512i vy = _mm512_permutex2var_epi8(vtable0, vx, vtable1);
const __mmask64 vm = _mm512_movepi8_mask(vx);
const __m512i vt = _mm512_permutex2var_epi8(vtable2, vx, vtable3);
vy = _mm512_mask_mov_epi8(vy, vm, vt);
_mm512_storeu_si512(output, vy);
output += 64;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch < 64);
const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << batch) - UINT64_C(1)));
__m512i vx = _mm512_maskz_loadu_epi8(vmask, input);
__m512i vy = _mm512_maskz_permutex2var_epi8(vmask, vtable0, vx, vtable1);
const __mmask64 vm = _mm512_movepi8_mask(vx);
const __m512i vt = _mm512_maskz_permutex2var_epi8(vmask, vtable2, vx, vtable3);
vy = _mm512_mask_mov_epi8(vy, vm, vt);
_mm512_mask_storeu_epi8(output, vmask, vy);
}
}
| 3,353
| 35.064516
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-avx512vbmi-vpermx2b-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/avx512vbmi-vpermx2b.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__avx512vbmi_vpermx2b_x64(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vtable0 = _mm512_loadu_si512(table);
const __m512i vtable1 = _mm512_loadu_si512(table + 64);
const __m512i vtable2 = _mm512_loadu_si512(table + 128);
const __m512i vtable3 = _mm512_loadu_si512(table + 192);
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
__m512i vx = _mm512_loadu_si512(input);
input += 64;
__m512i vy = _mm512_permutex2var_epi8(vtable0, vx, vtable1);
const __mmask64 vm = _mm512_movepi8_mask(vx);
const __m512i vt = _mm512_permutex2var_epi8(vtable2, vx, vtable3);
vy = _mm512_mask_mov_epi8(vy, vm, vt);
_mm512_storeu_si512(output, vy);
output += 64;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch < 64);
const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << batch) - UINT64_C(1)));
__m512i vx = _mm512_maskz_loadu_epi8(vmask, input);
__m512i vy = _mm512_maskz_permutex2var_epi8(vmask, vtable0, vx, vtable1);
const __mmask64 vm = _mm512_movepi8_mask(vx);
const __m512i vt = _mm512_maskz_permutex2var_epi8(vmask, vtable2, vx, vtable3);
vy = _mm512_mask_mov_epi8(vy, vm, vt);
_mm512_mask_storeu_epi8(output, vmask, vy);
}
}
| 1,886
| 29.934426
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__scalar_x1(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
do {
const size_t vx = (size_t) *input++;
const uint32_t vt = (uint32_t) table[vx];
*output++ = (uint8_t) vt;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
| 801
| 22.588235
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-scalar-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__scalar_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const size_t vx0 = (size_t) input[0];
const size_t vx1 = (size_t) input[1];
const size_t vx2 = (size_t) input[2];
const size_t vx3 = (size_t) input[3];
const size_t vx4 = (size_t) input[4];
const size_t vx5 = (size_t) input[5];
const size_t vx6 = (size_t) input[6];
const size_t vx7 = (size_t) input[7];
const size_t vx8 = (size_t) input[8];
const size_t vx9 = (size_t) input[9];
const size_t vx10 = (size_t) input[10];
const size_t vx11 = (size_t) input[11];
const size_t vx12 = (size_t) input[12];
const size_t vx13 = (size_t) input[13];
const size_t vx14 = (size_t) input[14];
const size_t vx15 = (size_t) input[15];
input += 16;
const uint32_t vt0 = (uint32_t) table[vx0];
const uint32_t vt1 = (uint32_t) table[vx1];
const uint32_t vt2 = (uint32_t) table[vx2];
const uint32_t vt3 = (uint32_t) table[vx3];
const uint32_t vt4 = (uint32_t) table[vx4];
const uint32_t vt5 = (uint32_t) table[vx5];
const uint32_t vt6 = (uint32_t) table[vx6];
const uint32_t vt7 = (uint32_t) table[vx7];
const uint32_t vt8 = (uint32_t) table[vx8];
const uint32_t vt9 = (uint32_t) table[vx9];
const uint32_t vt10 = (uint32_t) table[vx10];
const uint32_t vt11 = (uint32_t) table[vx11];
const uint32_t vt12 = (uint32_t) table[vx12];
const uint32_t vt13 = (uint32_t) table[vx13];
const uint32_t vt14 = (uint32_t) table[vx14];
const uint32_t vt15 = (uint32_t) table[vx15];
output[0] = (uint8_t) vt0;
output[1] = (uint8_t) vt1;
output[2] = (uint8_t) vt2;
output[3] = (uint8_t) vt3;
output[4] = (uint8_t) vt4;
output[5] = (uint8_t) vt5;
output[6] = (uint8_t) vt6;
output[7] = (uint8_t) vt7;
output[8] = (uint8_t) vt8;
output[9] = (uint8_t) vt9;
output[10] = (uint8_t) vt10;
output[11] = (uint8_t) vt11;
output[12] = (uint8_t) vt12;
output[13] = (uint8_t) vt13;
output[14] = (uint8_t) vt14;
output[15] = (uint8_t) vt15;
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const size_t vx = (size_t) *input++;
const uint32_t vt = (uint32_t) table[vx];
*output++ = (uint8_t) vt;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
}
| 2,936
| 31.633333
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__scalar_x2(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(uint8_t); batch -= 2 * sizeof(uint8_t)) {
const size_t vx0 = (size_t) input[0];
const size_t vx1 = (size_t) input[1];
input += 2;
const uint32_t vt0 = (uint32_t) table[vx0];
const uint32_t vt1 = (uint32_t) table[vx1];
output[0] = (uint8_t) vt0;
output[1] = (uint8_t) vt1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const size_t vx = (size_t) *input;
const uint32_t vt = (uint32_t) table[vx];
*output = (uint8_t) vt;
}
}
| 1,124
| 24
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__scalar_x4(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
const size_t vx0 = (size_t) input[0];
const size_t vx1 = (size_t) input[1];
const size_t vx2 = (size_t) input[2];
const size_t vx3 = (size_t) input[3];
input += 4;
const uint32_t vt0 = (uint32_t) table[vx0];
const uint32_t vt1 = (uint32_t) table[vx1];
const uint32_t vt2 = (uint32_t) table[vx2];
const uint32_t vt3 = (uint32_t) table[vx3];
output[0] = (uint8_t) vt0;
output[1] = (uint8_t) vt1;
output[2] = (uint8_t) vt2;
output[3] = (uint8_t) vt3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const size_t vx = (size_t) *input++;
const uint32_t vt = (uint32_t) table[vx];
*output++ = (uint8_t) vt;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
}
| 1,443
| 25.740741
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__scalar_x8(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const size_t vx0 = (size_t) input[0];
const size_t vx1 = (size_t) input[1];
const size_t vx2 = (size_t) input[2];
const size_t vx3 = (size_t) input[3];
const size_t vx4 = (size_t) input[4];
const size_t vx5 = (size_t) input[5];
const size_t vx6 = (size_t) input[6];
const size_t vx7 = (size_t) input[7];
input += 8;
const uint32_t vt0 = (uint32_t) table[vx0];
const uint32_t vt1 = (uint32_t) table[vx1];
const uint32_t vt2 = (uint32_t) table[vx2];
const uint32_t vt3 = (uint32_t) table[vx3];
const uint32_t vt4 = (uint32_t) table[vx4];
const uint32_t vt5 = (uint32_t) table[vx5];
const uint32_t vt6 = (uint32_t) table[vx6];
const uint32_t vt7 = (uint32_t) table[vx7];
output[0] = (uint8_t) vt0;
output[1] = (uint8_t) vt1;
output[2] = (uint8_t) vt2;
output[3] = (uint8_t) vt3;
output[4] = (uint8_t) vt4;
output[5] = (uint8_t) vt5;
output[6] = (uint8_t) vt6;
output[7] = (uint8_t) vt7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const size_t vx = (size_t) *input++;
const uint32_t vt = (uint32_t) table[vx];
*output++ = (uint8_t) vt;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
}
| 1,927
| 28.212121
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-ssse3-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/lut.h>
#include <xnnpack/unaligned.h>
void xnn_x8_lut_ukernel__ssse3_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vt0 = _mm_load_si128((const __m128i*) table);
const __m128i vt1 = _mm_load_si128((const __m128i*) (table + 16));
const __m128i vt2 = _mm_load_si128((const __m128i*) (table + 32));
const __m128i vt3 = _mm_load_si128((const __m128i*) (table + 48));
const __m128i vt4 = _mm_load_si128((const __m128i*) (table + 64));
const __m128i vt5 = _mm_load_si128((const __m128i*) (table + 80));
const __m128i vt6 = _mm_load_si128((const __m128i*) (table + 96));
const __m128i vt7 = _mm_load_si128((const __m128i*) (table + 112));
const __m128i vt8 = _mm_load_si128((const __m128i*) (table + 128));
const __m128i vt9 = _mm_load_si128((const __m128i*) (table + 144));
const __m128i vtA = _mm_load_si128((const __m128i*) (table + 160));
const __m128i vtB = _mm_load_si128((const __m128i*) (table + 176));
const __m128i vtC = _mm_load_si128((const __m128i*) (table + 192));
const __m128i vtD = _mm_load_si128((const __m128i*) (table + 208));
const __m128i vtE = _mm_load_si128((const __m128i*) (table + 224));
const __m128i vtF = _mm_load_si128((const __m128i*) (table + 240));
const __m128i vtable0 = vt0;
const __m128i vtable1 = _mm_xor_si128(vt0, vt1);
const __m128i vtable2 = _mm_xor_si128(vt1, vt2);
const __m128i vtable3 = _mm_xor_si128(vt2, vt3);
const __m128i vtable4 = _mm_xor_si128(vt3, vt4);
const __m128i vtable5 = _mm_xor_si128(vt4, vt5);
const __m128i vtable6 = _mm_xor_si128(vt5, vt6);
const __m128i vtable7 = _mm_xor_si128(vt6, vt7);
const __m128i vtable8 = _mm_xor_si128(_mm_xor_si128(vt7, vt8), vtable0);
const __m128i vtable9 = _mm_xor_si128(_mm_xor_si128(vt8, vt9), vtable1);
const __m128i vtableA = _mm_xor_si128(_mm_xor_si128(vt9, vtA), vtable2);
const __m128i vtableB = _mm_xor_si128(_mm_xor_si128(vtA, vtB), vtable3);
const __m128i vtableC = _mm_xor_si128(_mm_xor_si128(vtB, vtC), vtable4);
const __m128i vtableD = _mm_xor_si128(_mm_xor_si128(vtC, vtD), vtable5);
const __m128i vtableE = _mm_xor_si128(_mm_xor_si128(vtD, vtE), vtable6);
const __m128i vtableF = _mm_xor_si128(_mm_xor_si128(vtE, vtF), vtable7);
const __m128i voffset = _mm_set1_epi8(16);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vy = _mm_shuffle_epi8(vtable0, vx);
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vy = _mm_shuffle_epi8(vtable0, vx);
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) vy_lo;
}
}
}
| 6,691
| 39.804878
| 74
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-ssse3-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/lut.h>
#include <xnnpack/unaligned.h>
void xnn_x8_lut_ukernel__ssse3_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vt0 = _mm_load_si128((const __m128i*) table);
const __m128i vt1 = _mm_load_si128((const __m128i*) (table + 16));
const __m128i vt2 = _mm_load_si128((const __m128i*) (table + 32));
const __m128i vt3 = _mm_load_si128((const __m128i*) (table + 48));
const __m128i vt4 = _mm_load_si128((const __m128i*) (table + 64));
const __m128i vt5 = _mm_load_si128((const __m128i*) (table + 80));
const __m128i vt6 = _mm_load_si128((const __m128i*) (table + 96));
const __m128i vt7 = _mm_load_si128((const __m128i*) (table + 112));
const __m128i vt8 = _mm_load_si128((const __m128i*) (table + 128));
const __m128i vt9 = _mm_load_si128((const __m128i*) (table + 144));
const __m128i vtA = _mm_load_si128((const __m128i*) (table + 160));
const __m128i vtB = _mm_load_si128((const __m128i*) (table + 176));
const __m128i vtC = _mm_load_si128((const __m128i*) (table + 192));
const __m128i vtD = _mm_load_si128((const __m128i*) (table + 208));
const __m128i vtE = _mm_load_si128((const __m128i*) (table + 224));
const __m128i vtF = _mm_load_si128((const __m128i*) (table + 240));
const __m128i vtable0 = vt0;
const __m128i vtable1 = _mm_xor_si128(vt0, vt1);
const __m128i vtable2 = _mm_xor_si128(vt1, vt2);
const __m128i vtable3 = _mm_xor_si128(vt2, vt3);
const __m128i vtable4 = _mm_xor_si128(vt3, vt4);
const __m128i vtable5 = _mm_xor_si128(vt4, vt5);
const __m128i vtable6 = _mm_xor_si128(vt5, vt6);
const __m128i vtable7 = _mm_xor_si128(vt6, vt7);
const __m128i vtable8 = _mm_xor_si128(_mm_xor_si128(vt7, vt8), vtable0);
const __m128i vtable9 = _mm_xor_si128(_mm_xor_si128(vt8, vt9), vtable1);
const __m128i vtableA = _mm_xor_si128(_mm_xor_si128(vt9, vtA), vtable2);
const __m128i vtableB = _mm_xor_si128(_mm_xor_si128(vtA, vtB), vtable3);
const __m128i vtableC = _mm_xor_si128(_mm_xor_si128(vtB, vtC), vtable4);
const __m128i vtableD = _mm_xor_si128(_mm_xor_si128(vtC, vtD), vtable5);
const __m128i vtableE = _mm_xor_si128(_mm_xor_si128(vtD, vtE), vtable6);
const __m128i vtableF = _mm_xor_si128(_mm_xor_si128(vtE, vtF), vtable7);
const __m128i voffset = _mm_set1_epi8(16);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
__m128i vx0 = _mm_loadu_si128((const __m128i*) input);
__m128i vx1 = _mm_loadu_si128((const __m128i*) (input + 16));
input += 32;
__m128i vy0 = _mm_shuffle_epi8(vtable0, vx0);
__m128i vy1 = _mm_shuffle_epi8(vtable0, vx1);
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable1, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable1, vx1));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable2, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable2, vx1));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable3, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable3, vx1));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable4, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable4, vx1));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable5, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable5, vx1));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable6, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable6, vx1));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable7, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable7, vx1));
vx0 = _mm_sub_epi8(vx0, voffset);
vx1 = _mm_sub_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable8, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable8, vx1));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable9, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable9, vx1));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableA, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableA, vx1));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableB, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableB, vx1));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableC, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableC, vx1));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableD, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableD, vx1));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableE, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableE, vx1));
vx0 = _mm_subs_epi8(vx0, voffset);
vx1 = _mm_subs_epi8(vx1, voffset);
vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableF, vx0));
vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableF, vx1));
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vy = _mm_shuffle_epi8(vtable0, vx);
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
__m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vy = _mm_shuffle_epi8(vtable0, vx);
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) vy_lo;
}
}
}
| 10,145
| 41.630252
| 74
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-wasmpshufb-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/wasmpshufb.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/lut.h>
void xnn_x8_lut_ukernel__wasmpshufb_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vt0 = wasm_v128_load(table);
const v128_t vt1 = wasm_v128_load(table + 16);
const v128_t vt2 = wasm_v128_load(table + 32);
const v128_t vt3 = wasm_v128_load(table + 48);
const v128_t vt4 = wasm_v128_load(table + 64);
const v128_t vt5 = wasm_v128_load(table + 80);
const v128_t vt6 = wasm_v128_load(table + 96);
const v128_t vt7 = wasm_v128_load(table + 112);
const v128_t vt8 = wasm_v128_load(table + 128);
const v128_t vt9 = wasm_v128_load(table + 144);
const v128_t vtA = wasm_v128_load(table + 160);
const v128_t vtB = wasm_v128_load(table + 176);
const v128_t vtC = wasm_v128_load(table + 192);
const v128_t vtD = wasm_v128_load(table + 208);
const v128_t vtE = wasm_v128_load(table + 224);
const v128_t vtF = wasm_v128_load(table + 240);
const v128_t vtable0 = vt0;
const v128_t vtable1 = wasm_v128_xor(vt0, vt1);
const v128_t vtable2 = wasm_v128_xor(vt1, vt2);
const v128_t vtable3 = wasm_v128_xor(vt2, vt3);
const v128_t vtable4 = wasm_v128_xor(vt3, vt4);
const v128_t vtable5 = wasm_v128_xor(vt4, vt5);
const v128_t vtable6 = wasm_v128_xor(vt5, vt6);
const v128_t vtable7 = wasm_v128_xor(vt6, vt7);
const v128_t vtable8 = wasm_v128_xor(wasm_v128_xor(vt7, vt8), vtable0);
const v128_t vtable9 = wasm_v128_xor(wasm_v128_xor(vt8, vt9), vtable1);
const v128_t vtableA = wasm_v128_xor(wasm_v128_xor(vt9, vtA), vtable2);
const v128_t vtableB = wasm_v128_xor(wasm_v128_xor(vtA, vtB), vtable3);
const v128_t vtableC = wasm_v128_xor(wasm_v128_xor(vtB, vtC), vtable4);
const v128_t vtableD = wasm_v128_xor(wasm_v128_xor(vtC, vtD), vtable5);
const v128_t vtableE = wasm_v128_xor(wasm_v128_xor(vtD, vtE), vtable6);
const v128_t vtableF = wasm_v128_xor(wasm_v128_xor(vtE, vtF), vtable7);
const v128_t voffset = wasm_i8x16_const_splat(16);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
v128_t vx = wasm_v128_load(input);
input += 16;
v128_t vy = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx));
wasm_v128_store(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load((const v128_t*) input);
v128_t vy = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx));
if (batch & (8 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 6,996
| 42.191358
| 78
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-wasmpshufb-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/wasmpshufb.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/lut.h>
void xnn_x8_lut_ukernel__wasmpshufb_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vt0 = wasm_v128_load(table);
const v128_t vt1 = wasm_v128_load(table + 16);
const v128_t vt2 = wasm_v128_load(table + 32);
const v128_t vt3 = wasm_v128_load(table + 48);
const v128_t vt4 = wasm_v128_load(table + 64);
const v128_t vt5 = wasm_v128_load(table + 80);
const v128_t vt6 = wasm_v128_load(table + 96);
const v128_t vt7 = wasm_v128_load(table + 112);
const v128_t vt8 = wasm_v128_load(table + 128);
const v128_t vt9 = wasm_v128_load(table + 144);
const v128_t vtA = wasm_v128_load(table + 160);
const v128_t vtB = wasm_v128_load(table + 176);
const v128_t vtC = wasm_v128_load(table + 192);
const v128_t vtD = wasm_v128_load(table + 208);
const v128_t vtE = wasm_v128_load(table + 224);
const v128_t vtF = wasm_v128_load(table + 240);
const v128_t vtable0 = vt0;
const v128_t vtable1 = wasm_v128_xor(vt0, vt1);
const v128_t vtable2 = wasm_v128_xor(vt1, vt2);
const v128_t vtable3 = wasm_v128_xor(vt2, vt3);
const v128_t vtable4 = wasm_v128_xor(vt3, vt4);
const v128_t vtable5 = wasm_v128_xor(vt4, vt5);
const v128_t vtable6 = wasm_v128_xor(vt5, vt6);
const v128_t vtable7 = wasm_v128_xor(vt6, vt7);
const v128_t vtable8 = wasm_v128_xor(wasm_v128_xor(vt7, vt8), vtable0);
const v128_t vtable9 = wasm_v128_xor(wasm_v128_xor(vt8, vt9), vtable1);
const v128_t vtableA = wasm_v128_xor(wasm_v128_xor(vt9, vtA), vtable2);
const v128_t vtableB = wasm_v128_xor(wasm_v128_xor(vtA, vtB), vtable3);
const v128_t vtableC = wasm_v128_xor(wasm_v128_xor(vtB, vtC), vtable4);
const v128_t vtableD = wasm_v128_xor(wasm_v128_xor(vtC, vtD), vtable5);
const v128_t vtableE = wasm_v128_xor(wasm_v128_xor(vtD, vtE), vtable6);
const v128_t vtableF = wasm_v128_xor(wasm_v128_xor(vtE, vtF), vtable7);
const v128_t voffset = wasm_i8x16_const_splat(16);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
v128_t vx0 = wasm_v128_load((const v128_t*) input);
v128_t vx1 = wasm_v128_load((const v128_t*) (input + 16));
input += 32;
v128_t vy0 = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx0);
v128_t vy1 = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx1);
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx1));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx1));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx1));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx1));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx1));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx1));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx1));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx1));
wasm_v128_store(output, vy0);
wasm_v128_store(output + 16, vy1);
output += 32;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
v128_t vx = wasm_v128_load(input);
input += 16;
v128_t vy = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx));
wasm_v128_store(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load((const v128_t*) input);
v128_t vy = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx));
if (batch & (8 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 11,158
| 46.283898
| 81
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-wasmpshufb-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/wasmpshufb.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/lut.h>
void xnn_x8_lut_ukernel__wasmpshufb_x48(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vt0 = wasm_v128_load(table);
const v128_t vt1 = wasm_v128_load(table + 16);
const v128_t vt2 = wasm_v128_load(table + 32);
const v128_t vt3 = wasm_v128_load(table + 48);
const v128_t vt4 = wasm_v128_load(table + 64);
const v128_t vt5 = wasm_v128_load(table + 80);
const v128_t vt6 = wasm_v128_load(table + 96);
const v128_t vt7 = wasm_v128_load(table + 112);
const v128_t vt8 = wasm_v128_load(table + 128);
const v128_t vt9 = wasm_v128_load(table + 144);
const v128_t vtA = wasm_v128_load(table + 160);
const v128_t vtB = wasm_v128_load(table + 176);
const v128_t vtC = wasm_v128_load(table + 192);
const v128_t vtD = wasm_v128_load(table + 208);
const v128_t vtE = wasm_v128_load(table + 224);
const v128_t vtF = wasm_v128_load(table + 240);
const v128_t vtable0 = vt0;
const v128_t vtable1 = wasm_v128_xor(vt0, vt1);
const v128_t vtable2 = wasm_v128_xor(vt1, vt2);
const v128_t vtable3 = wasm_v128_xor(vt2, vt3);
const v128_t vtable4 = wasm_v128_xor(vt3, vt4);
const v128_t vtable5 = wasm_v128_xor(vt4, vt5);
const v128_t vtable6 = wasm_v128_xor(vt5, vt6);
const v128_t vtable7 = wasm_v128_xor(vt6, vt7);
const v128_t vtable8 = wasm_v128_xor(wasm_v128_xor(vt7, vt8), vtable0);
const v128_t vtable9 = wasm_v128_xor(wasm_v128_xor(vt8, vt9), vtable1);
const v128_t vtableA = wasm_v128_xor(wasm_v128_xor(vt9, vtA), vtable2);
const v128_t vtableB = wasm_v128_xor(wasm_v128_xor(vtA, vtB), vtable3);
const v128_t vtableC = wasm_v128_xor(wasm_v128_xor(vtB, vtC), vtable4);
const v128_t vtableD = wasm_v128_xor(wasm_v128_xor(vtC, vtD), vtable5);
const v128_t vtableE = wasm_v128_xor(wasm_v128_xor(vtD, vtE), vtable6);
const v128_t vtableF = wasm_v128_xor(wasm_v128_xor(vtE, vtF), vtable7);
const v128_t voffset = wasm_i8x16_const_splat(16);
for (; batch >= 48 * sizeof(uint8_t); batch -= 48 * sizeof(uint8_t)) {
v128_t vx0 = wasm_v128_load((const v128_t*) input);
v128_t vx1 = wasm_v128_load((const v128_t*) (input + 16));
v128_t vx2 = wasm_v128_load((const v128_t*) (input + 32));
input += 48;
v128_t vy0 = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx0);
v128_t vy1 = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx1);
v128_t vy2 = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx2);
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx2));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vx2 = wasm_i8x16_sub_sat(vx2, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx2));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vx2 = wasm_i8x16_sub_sat(vx2, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx2));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vx2 = wasm_i8x16_sub_sat(vx2, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx2));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vx2 = wasm_i8x16_sub_sat(vx2, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx2));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vx2 = wasm_i8x16_sub_sat(vx2, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx2));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vx2 = wasm_i8x16_sub_sat(vx2, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx2));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vx2 = wasm_i8x16_sub_sat(vx2, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx2));
wasm_v128_store(output, vy0);
wasm_v128_store(output + 16, vy1);
wasm_v128_store(output + 32, vy2);
output += 48;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
v128_t vx = wasm_v128_load(input);
input += 16;
v128_t vy = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx));
wasm_v128_store(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load((const v128_t*) input);
v128_t vy = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx));
if (batch & (8 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 13,187
| 48.026022
| 81
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-wasmpshufb-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/wasmpshufb.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/lut.h>
void xnn_x8_lut_ukernel__wasmpshufb_x64(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vt0 = wasm_v128_load(table);
const v128_t vt1 = wasm_v128_load(table + 16);
const v128_t vt2 = wasm_v128_load(table + 32);
const v128_t vt3 = wasm_v128_load(table + 48);
const v128_t vt4 = wasm_v128_load(table + 64);
const v128_t vt5 = wasm_v128_load(table + 80);
const v128_t vt6 = wasm_v128_load(table + 96);
const v128_t vt7 = wasm_v128_load(table + 112);
const v128_t vt8 = wasm_v128_load(table + 128);
const v128_t vt9 = wasm_v128_load(table + 144);
const v128_t vtA = wasm_v128_load(table + 160);
const v128_t vtB = wasm_v128_load(table + 176);
const v128_t vtC = wasm_v128_load(table + 192);
const v128_t vtD = wasm_v128_load(table + 208);
const v128_t vtE = wasm_v128_load(table + 224);
const v128_t vtF = wasm_v128_load(table + 240);
const v128_t vtable0 = vt0;
const v128_t vtable1 = wasm_v128_xor(vt0, vt1);
const v128_t vtable2 = wasm_v128_xor(vt1, vt2);
const v128_t vtable3 = wasm_v128_xor(vt2, vt3);
const v128_t vtable4 = wasm_v128_xor(vt3, vt4);
const v128_t vtable5 = wasm_v128_xor(vt4, vt5);
const v128_t vtable6 = wasm_v128_xor(vt5, vt6);
const v128_t vtable7 = wasm_v128_xor(vt6, vt7);
const v128_t vtable8 = wasm_v128_xor(wasm_v128_xor(vt7, vt8), vtable0);
const v128_t vtable9 = wasm_v128_xor(wasm_v128_xor(vt8, vt9), vtable1);
const v128_t vtableA = wasm_v128_xor(wasm_v128_xor(vt9, vtA), vtable2);
const v128_t vtableB = wasm_v128_xor(wasm_v128_xor(vtA, vtB), vtable3);
const v128_t vtableC = wasm_v128_xor(wasm_v128_xor(vtB, vtC), vtable4);
const v128_t vtableD = wasm_v128_xor(wasm_v128_xor(vtC, vtD), vtable5);
const v128_t vtableE = wasm_v128_xor(wasm_v128_xor(vtD, vtE), vtable6);
const v128_t vtableF = wasm_v128_xor(wasm_v128_xor(vtE, vtF), vtable7);
const v128_t voffset = wasm_i8x16_const_splat(16);
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
v128_t vx0 = wasm_v128_load((const v128_t*) input);
v128_t vx1 = wasm_v128_load((const v128_t*) (input + 16));
v128_t vx2 = wasm_v128_load((const v128_t*) (input + 32));
v128_t vx3 = wasm_v128_load((const v128_t*) (input + 48));
input += 64;
v128_t vy0 = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx0);
v128_t vy1 = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx1);
v128_t vy2 = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx2);
v128_t vy3 = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx3);
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vx3 = wasm_i8x16_sub(vx3, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx2));
vy3 = wasm_v128_xor(vy3, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vx3 = wasm_i8x16_sub(vx3, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx2));
vy3 = wasm_v128_xor(vy3, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vx3 = wasm_i8x16_sub(vx3, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx2));
vy3 = wasm_v128_xor(vy3, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vx3 = wasm_i8x16_sub(vx3, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx2));
vy3 = wasm_v128_xor(vy3, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vx3 = wasm_i8x16_sub(vx3, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx2));
vy3 = wasm_v128_xor(vy3, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vx3 = wasm_i8x16_sub(vx3, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx2));
vy3 = wasm_v128_xor(vy3, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vx3 = wasm_i8x16_sub(vx3, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx2));
vy3 = wasm_v128_xor(vy3, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vx1 = wasm_i8x16_sub(vx1, voffset);
vx2 = wasm_i8x16_sub(vx2, voffset);
vx3 = wasm_i8x16_sub(vx3, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx2));
vy3 = wasm_v128_xor(vy3, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx3));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vx2 = wasm_i8x16_sub_sat(vx2, voffset);
vx3 = wasm_i8x16_sub_sat(vx3, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx2));
vy3 = wasm_v128_xor(vy3, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx3));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vx2 = wasm_i8x16_sub_sat(vx2, voffset);
vx3 = wasm_i8x16_sub_sat(vx3, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx2));
vy3 = wasm_v128_xor(vy3, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx3));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vx2 = wasm_i8x16_sub_sat(vx2, voffset);
vx3 = wasm_i8x16_sub_sat(vx3, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx2));
vy3 = wasm_v128_xor(vy3, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx3));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vx2 = wasm_i8x16_sub_sat(vx2, voffset);
vx3 = wasm_i8x16_sub_sat(vx3, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx2));
vy3 = wasm_v128_xor(vy3, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx3));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vx2 = wasm_i8x16_sub_sat(vx2, voffset);
vx3 = wasm_i8x16_sub_sat(vx3, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx2));
vy3 = wasm_v128_xor(vy3, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx3));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vx2 = wasm_i8x16_sub_sat(vx2, voffset);
vx3 = wasm_i8x16_sub_sat(vx3, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx2));
vy3 = wasm_v128_xor(vy3, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx3));
vx0 = wasm_i8x16_sub_sat(vx0, voffset);
vx1 = wasm_i8x16_sub_sat(vx1, voffset);
vx2 = wasm_i8x16_sub_sat(vx2, voffset);
vx3 = wasm_i8x16_sub_sat(vx3, voffset);
vy0 = wasm_v128_xor(vy0, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx0));
vy1 = wasm_v128_xor(vy1, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx1));
vy2 = wasm_v128_xor(vy2, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx2));
vy3 = wasm_v128_xor(vy3, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx3));
wasm_v128_store(output, vy0);
wasm_v128_store(output + 16, vy1);
wasm_v128_store(output + 32, vy2);
wasm_v128_store(output + 48, vy3);
output += 64;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
v128_t vx = wasm_v128_load(input);
input += 16;
v128_t vy = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx));
wasm_v128_store(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load((const v128_t*) input);
v128_t vy = __builtin_wasm_relaxed_swizzle_i8x16(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable8, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtable9, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableA, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableB, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableC, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableD, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableE, vx));
vx = wasm_i8x16_sub_sat(vx, voffset);
vy = wasm_v128_xor(vy, __builtin_wasm_relaxed_swizzle_i8x16(vtableF, vx));
if (batch & (8 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 15,216
| 49.387417
| 81
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__wasmsimd_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vtable0 = wasm_v128_load(table);
const v128_t vtable1 = wasm_v128_load(table + 16);
const v128_t vtable2 = wasm_v128_load(table + 32);
const v128_t vtable3 = wasm_v128_load(table + 48);
const v128_t vtable4 = wasm_v128_load(table + 64);
const v128_t vtable5 = wasm_v128_load(table + 80);
const v128_t vtable6 = wasm_v128_load(table + 96);
const v128_t vtable7 = wasm_v128_load(table + 112);
const v128_t vtable8 = wasm_v128_load(table + 128);
const v128_t vtable9 = wasm_v128_load(table + 144);
const v128_t vtable10 = wasm_v128_load(table + 160);
const v128_t vtable11 = wasm_v128_load(table + 176);
const v128_t vtable12 = wasm_v128_load(table + 192);
const v128_t vtable13 = wasm_v128_load(table + 208);
const v128_t vtable14 = wasm_v128_load(table + 224);
const v128_t vtable15 = wasm_v128_load(table + 240);
const v128_t voffset = wasm_i8x16_const_splat(16);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
v128_t vx = wasm_v128_load(input);
input += 16;
v128_t vy = wasm_i8x16_swizzle(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable8, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable9, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable10, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable11, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable12, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable13, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable14, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable15, vx));
wasm_v128_store(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_i8x16_swizzle(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable8, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable9, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable10, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable11, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable12, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable13, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable14, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable15, vx));
if (batch & (8 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 5,461
| 37.195804
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-wasmsimd-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__wasmsimd_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vtable0 = wasm_v128_load(table);
const v128_t vtable1 = wasm_v128_load(table + 16);
const v128_t vtable2 = wasm_v128_load(table + 32);
const v128_t vtable3 = wasm_v128_load(table + 48);
const v128_t vtable4 = wasm_v128_load(table + 64);
const v128_t vtable5 = wasm_v128_load(table + 80);
const v128_t vtable6 = wasm_v128_load(table + 96);
const v128_t vtable7 = wasm_v128_load(table + 112);
const v128_t vtable8 = wasm_v128_load(table + 128);
const v128_t vtable9 = wasm_v128_load(table + 144);
const v128_t vtable10 = wasm_v128_load(table + 160);
const v128_t vtable11 = wasm_v128_load(table + 176);
const v128_t vtable12 = wasm_v128_load(table + 192);
const v128_t vtable13 = wasm_v128_load(table + 208);
const v128_t vtable14 = wasm_v128_load(table + 224);
const v128_t vtable15 = wasm_v128_load(table + 240);
const v128_t voffset = wasm_i8x16_const_splat(16);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
v128_t vx0 = wasm_v128_load(input);
v128_t vx1 = wasm_v128_load(input + 16);
input += 32;
v128_t vy0 = wasm_i8x16_swizzle(vtable0, vx0);
v128_t vy1 = wasm_i8x16_swizzle(vtable0, vx1);
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable1, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable1, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable2, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable2, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable3, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable3, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable4, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable4, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable5, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable5, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable6, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable6, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable7, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable7, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable8, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable8, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable9, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable9, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable10, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable10, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable11, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable11, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable12, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable12, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable13, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable13, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable14, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable14, vx1));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable15, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable15, vx1));
wasm_v128_store(output, vy0);
wasm_v128_store(output + 16, vy1);
output += 32;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
v128_t vx = wasm_v128_load(input);
input += 16;
v128_t vy = wasm_i8x16_swizzle(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable8, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable9, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable10, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable11, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable12, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable13, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable14, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable15, vx));
wasm_v128_store(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_i8x16_swizzle(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable8, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable9, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable10, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable11, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable12, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable13, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable14, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable15, vx));
if (batch & (8 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 8,938
| 40.384259
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-wasmsimd-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__wasmsimd_x48(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vtable0 = wasm_v128_load(table);
const v128_t vtable1 = wasm_v128_load(table + 16);
const v128_t vtable2 = wasm_v128_load(table + 32);
const v128_t vtable3 = wasm_v128_load(table + 48);
const v128_t vtable4 = wasm_v128_load(table + 64);
const v128_t vtable5 = wasm_v128_load(table + 80);
const v128_t vtable6 = wasm_v128_load(table + 96);
const v128_t vtable7 = wasm_v128_load(table + 112);
const v128_t vtable8 = wasm_v128_load(table + 128);
const v128_t vtable9 = wasm_v128_load(table + 144);
const v128_t vtable10 = wasm_v128_load(table + 160);
const v128_t vtable11 = wasm_v128_load(table + 176);
const v128_t vtable12 = wasm_v128_load(table + 192);
const v128_t vtable13 = wasm_v128_load(table + 208);
const v128_t vtable14 = wasm_v128_load(table + 224);
const v128_t vtable15 = wasm_v128_load(table + 240);
const v128_t voffset = wasm_i8x16_const_splat(16);
for (; batch >= 48 * sizeof(uint8_t); batch -= 48 * sizeof(uint8_t)) {
v128_t vx0 = wasm_v128_load(input);
v128_t vx1 = wasm_v128_load(input + 16);
v128_t vx2 = wasm_v128_load(input + 32);
input += 48;
v128_t vy0 = wasm_i8x16_swizzle(vtable0, vx0);
v128_t vy1 = wasm_i8x16_swizzle(vtable0, vx1);
v128_t vy2 = wasm_i8x16_swizzle(vtable0, vx2);
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable1, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable1, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable1, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable2, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable2, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable2, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable3, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable3, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable3, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable4, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable4, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable4, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable5, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable5, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable5, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable6, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable6, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable6, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable7, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable7, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable7, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable8, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable8, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable8, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable9, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable9, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable9, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable10, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable10, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable10, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable11, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable11, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable11, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable12, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable12, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable12, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable13, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable13, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable13, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable14, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable14, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable14, vx2));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable15, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable15, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable15, vx2));
wasm_v128_store(output, vy0);
wasm_v128_store(output + 16, vy1);
wasm_v128_store(output + 32, vy2);
output += 48;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
v128_t vx = wasm_v128_load(input);
input += 16;
v128_t vy = wasm_i8x16_swizzle(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable8, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable9, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable10, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable11, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable12, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable13, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable14, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable15, vx));
wasm_v128_store(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_i8x16_swizzle(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable8, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable9, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable10, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable11, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable12, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable13, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable14, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable15, vx));
if (batch & (8 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 10,624
| 41.670683
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut/gen/x8-lut-wasmsimd-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-lut/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__wasmsimd_x64(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vtable0 = wasm_v128_load(table);
const v128_t vtable1 = wasm_v128_load(table + 16);
const v128_t vtable2 = wasm_v128_load(table + 32);
const v128_t vtable3 = wasm_v128_load(table + 48);
const v128_t vtable4 = wasm_v128_load(table + 64);
const v128_t vtable5 = wasm_v128_load(table + 80);
const v128_t vtable6 = wasm_v128_load(table + 96);
const v128_t vtable7 = wasm_v128_load(table + 112);
const v128_t vtable8 = wasm_v128_load(table + 128);
const v128_t vtable9 = wasm_v128_load(table + 144);
const v128_t vtable10 = wasm_v128_load(table + 160);
const v128_t vtable11 = wasm_v128_load(table + 176);
const v128_t vtable12 = wasm_v128_load(table + 192);
const v128_t vtable13 = wasm_v128_load(table + 208);
const v128_t vtable14 = wasm_v128_load(table + 224);
const v128_t vtable15 = wasm_v128_load(table + 240);
const v128_t voffset = wasm_i8x16_const_splat(16);
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
v128_t vx0 = wasm_v128_load(input);
v128_t vx1 = wasm_v128_load(input + 16);
v128_t vx2 = wasm_v128_load(input + 32);
v128_t vx3 = wasm_v128_load(input + 48);
input += 64;
v128_t vy0 = wasm_i8x16_swizzle(vtable0, vx0);
v128_t vy1 = wasm_i8x16_swizzle(vtable0, vx1);
v128_t vy2 = wasm_i8x16_swizzle(vtable0, vx2);
v128_t vy3 = wasm_i8x16_swizzle(vtable0, vx3);
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable1, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable1, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable1, vx2));
vx3 = wasm_i8x16_sub(vx3, voffset);
vy3 = wasm_v128_or(vy3, wasm_i8x16_swizzle(vtable1, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable2, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable2, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable2, vx2));
vx3 = wasm_i8x16_sub(vx3, voffset);
vy3 = wasm_v128_or(vy3, wasm_i8x16_swizzle(vtable2, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable3, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable3, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable3, vx2));
vx3 = wasm_i8x16_sub(vx3, voffset);
vy3 = wasm_v128_or(vy3, wasm_i8x16_swizzle(vtable3, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable4, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable4, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable4, vx2));
vx3 = wasm_i8x16_sub(vx3, voffset);
vy3 = wasm_v128_or(vy3, wasm_i8x16_swizzle(vtable4, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable5, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable5, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable5, vx2));
vx3 = wasm_i8x16_sub(vx3, voffset);
vy3 = wasm_v128_or(vy3, wasm_i8x16_swizzle(vtable5, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable6, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable6, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable6, vx2));
vx3 = wasm_i8x16_sub(vx3, voffset);
vy3 = wasm_v128_or(vy3, wasm_i8x16_swizzle(vtable6, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable7, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable7, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable7, vx2));
vx3 = wasm_i8x16_sub(vx3, voffset);
vy3 = wasm_v128_or(vy3, wasm_i8x16_swizzle(vtable7, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable8, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable8, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable8, vx2));
vx3 = wasm_i8x16_sub(vx3, voffset);
vy3 = wasm_v128_or(vy3, wasm_i8x16_swizzle(vtable8, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable9, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable9, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable9, vx2));
vx3 = wasm_i8x16_sub(vx3, voffset);
vy3 = wasm_v128_or(vy3, wasm_i8x16_swizzle(vtable9, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable10, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable10, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable10, vx2));
vx3 = wasm_i8x16_sub(vx3, voffset);
vy3 = wasm_v128_or(vy3, wasm_i8x16_swizzle(vtable10, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable11, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable11, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable11, vx2));
vx3 = wasm_i8x16_sub(vx3, voffset);
vy3 = wasm_v128_or(vy3, wasm_i8x16_swizzle(vtable11, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable12, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable12, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable12, vx2));
vx3 = wasm_i8x16_sub(vx3, voffset);
vy3 = wasm_v128_or(vy3, wasm_i8x16_swizzle(vtable12, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable13, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable13, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable13, vx2));
vx3 = wasm_i8x16_sub(vx3, voffset);
vy3 = wasm_v128_or(vy3, wasm_i8x16_swizzle(vtable13, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable14, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable14, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable14, vx2));
vx3 = wasm_i8x16_sub(vx3, voffset);
vy3 = wasm_v128_or(vy3, wasm_i8x16_swizzle(vtable14, vx3));
vx0 = wasm_i8x16_sub(vx0, voffset);
vy0 = wasm_v128_or(vy0, wasm_i8x16_swizzle(vtable15, vx0));
vx1 = wasm_i8x16_sub(vx1, voffset);
vy1 = wasm_v128_or(vy1, wasm_i8x16_swizzle(vtable15, vx1));
vx2 = wasm_i8x16_sub(vx2, voffset);
vy2 = wasm_v128_or(vy2, wasm_i8x16_swizzle(vtable15, vx2));
vx3 = wasm_i8x16_sub(vx3, voffset);
vy3 = wasm_v128_or(vy3, wasm_i8x16_swizzle(vtable15, vx3));
wasm_v128_store(output, vy0);
wasm_v128_store(output + 16, vy1);
wasm_v128_store(output + 32, vy2);
wasm_v128_store(output + 48, vy3);
output += 64;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
v128_t vx = wasm_v128_load(input);
input += 16;
v128_t vy = wasm_i8x16_swizzle(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable8, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable9, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable10, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable11, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable12, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable13, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable14, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable15, vx));
wasm_v128_store(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_i8x16_swizzle(vtable0, vx);
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable1, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable2, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable3, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable4, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable5, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable6, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable7, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable8, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable9, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable10, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable11, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable12, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable13, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable14, vx));
vx = wasm_i8x16_sub(vx, voffset);
vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable15, vx));
if (batch & (8 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 12,310
| 42.656028
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-packw/gen/x8-packw-x16-gemm-goi-scalar-int-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x8_packw_gemm_goi_ukernel_x16__scalar_int_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const int8_t* weights,
const uint32_t* bias,
int8_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 16); // This kernel is for NR=16
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
int8_t* out = (int8_t*) packed_weights;
const uint32_t* b = (const uint32_t*) bias;
do {
// NC main loop multiple of 16
const int8_t* w0 = (const int8_t*) weights;
size_t n = nc;
for (;n >= 16; n -= 16) {
if XNN_LIKELY(b != NULL) {
((uint32_t*) out)[0] = b[0];
((uint32_t*) out)[1] = b[1];
((uint32_t*) out)[2] = b[2];
((uint32_t*) out)[3] = b[3];
((uint32_t*) out)[4] = b[4];
((uint32_t*) out)[5] = b[5];
((uint32_t*) out)[6] = b[6];
((uint32_t*) out)[7] = b[7];
((uint32_t*) out)[8] = b[8];
((uint32_t*) out)[9] = b[9];
((uint32_t*) out)[10] = b[10];
((uint32_t*) out)[11] = b[11];
((uint32_t*) out)[12] = b[12];
((uint32_t*) out)[13] = b[13];
((uint32_t*) out)[14] = b[14];
((uint32_t*) out)[15] = b[15];
b += 16;
} else {
((uint32_t*) out)[0] = 0;
((uint32_t*) out)[1] = 0;
((uint32_t*) out)[2] = 0;
((uint32_t*) out)[3] = 0;
((uint32_t*) out)[4] = 0;
((uint32_t*) out)[5] = 0;
((uint32_t*) out)[6] = 0;
((uint32_t*) out)[7] = 0;
((uint32_t*) out)[8] = 0;
((uint32_t*) out)[9] = 0;
((uint32_t*) out)[10] = 0;
((uint32_t*) out)[11] = 0;
((uint32_t*) out)[12] = 0;
((uint32_t*) out)[13] = 0;
((uint32_t*) out)[14] = 0;
((uint32_t*) out)[15] = 0;
}
out += 16 * sizeof(uint32_t);
const int8_t* w1 = w0 + kc;
const int8_t* w2 = w1 + kc;
const int8_t* w3 = w2 + kc;
const int8_t* w4 = w3 + kc;
const int8_t* w5 = w4 + kc;
const int8_t* w6 = w5 + kc;
const int8_t* w7 = w6 + kc;
const int8_t* w8 = w7 + kc;
const int8_t* w9 = w8 + kc;
const int8_t* w10 = w9 + kc;
const int8_t* w11 = w10 + kc;
const int8_t* w12 = w11 + kc;
const int8_t* w13 = w12 + kc;
const int8_t* w14 = w13 + kc;
const int8_t* w15 = w14 + kc;
// KC main loop multiple of 16x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const int8_t v00 = w0[0];
const int8_t v01 = w0[1];
const int8_t v02 = w0[2];
const int8_t v03 = w0[3];
w0 += 4;
const int8_t v10 = w1[0];
const int8_t v11 = w1[1];
const int8_t v12 = w1[2];
const int8_t v13 = w1[3];
w1 += 4;
const int8_t v20 = w2[0];
const int8_t v21 = w2[1];
const int8_t v22 = w2[2];
const int8_t v23 = w2[3];
w2 += 4;
const int8_t v30 = w3[0];
const int8_t v31 = w3[1];
const int8_t v32 = w3[2];
const int8_t v33 = w3[3];
w3 += 4;
const int8_t v40 = w4[0];
const int8_t v41 = w4[1];
const int8_t v42 = w4[2];
const int8_t v43 = w4[3];
w4 += 4;
const int8_t v50 = w5[0];
const int8_t v51 = w5[1];
const int8_t v52 = w5[2];
const int8_t v53 = w5[3];
w5 += 4;
const int8_t v60 = w6[0];
const int8_t v61 = w6[1];
const int8_t v62 = w6[2];
const int8_t v63 = w6[3];
w6 += 4;
const int8_t v70 = w7[0];
const int8_t v71 = w7[1];
const int8_t v72 = w7[2];
const int8_t v73 = w7[3];
w7 += 4;
const int8_t v80 = w8[0];
const int8_t v81 = w8[1];
const int8_t v82 = w8[2];
const int8_t v83 = w8[3];
w8 += 4;
const int8_t v90 = w9[0];
const int8_t v91 = w9[1];
const int8_t v92 = w9[2];
const int8_t v93 = w9[3];
w9 += 4;
const int8_t v100 = w10[0];
const int8_t v101 = w10[1];
const int8_t v102 = w10[2];
const int8_t v103 = w10[3];
w10 += 4;
const int8_t v110 = w11[0];
const int8_t v111 = w11[1];
const int8_t v112 = w11[2];
const int8_t v113 = w11[3];
w11 += 4;
const int8_t v120 = w12[0];
const int8_t v121 = w12[1];
const int8_t v122 = w12[2];
const int8_t v123 = w12[3];
w12 += 4;
const int8_t v130 = w13[0];
const int8_t v131 = w13[1];
const int8_t v132 = w13[2];
const int8_t v133 = w13[3];
w13 += 4;
const int8_t v140 = w14[0];
const int8_t v141 = w14[1];
const int8_t v142 = w14[2];
const int8_t v143 = w14[3];
w14 += 4;
const int8_t v150 = w15[0];
const int8_t v151 = w15[1];
const int8_t v152 = w15[2];
const int8_t v153 = w15[3];
w15 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[7] = v70;
out[8] = v80;
out[9] = v90;
out[10] = v100;
out[11] = v110;
out[12] = v120;
out[13] = v130;
out[14] = v140;
out[15] = v150;
out[16] = v01;
out[17] = v11;
out[18] = v21;
out[19] = v31;
out[20] = v41;
out[21] = v51;
out[22] = v61;
out[23] = v71;
out[24] = v81;
out[25] = v91;
out[26] = v101;
out[27] = v111;
out[28] = v121;
out[29] = v131;
out[30] = v141;
out[31] = v151;
out[32] = v02;
out[33] = v12;
out[34] = v22;
out[35] = v32;
out[36] = v42;
out[37] = v52;
out[38] = v62;
out[39] = v72;
out[40] = v82;
out[41] = v92;
out[42] = v102;
out[43] = v112;
out[44] = v122;
out[45] = v132;
out[46] = v142;
out[47] = v152;
out[48] = v03;
out[49] = v13;
out[50] = v23;
out[51] = v33;
out[52] = v43;
out[53] = v53;
out[54] = v63;
out[55] = v73;
out[56] = v83;
out[57] = v93;
out[58] = v103;
out[59] = v113;
out[60] = v123;
out[61] = v133;
out[62] = v143;
out[63] = v153;
out += 64;
}
// KC remainder
for (; k != 0; --k) {
const int8_t v0 = *w0++;
out[0] = v0;
const int8_t v1 = *w1++;
out[1] = v1;
const int8_t v2 = *w2++;
out[2] = v2;
const int8_t v3 = *w3++;
out[3] = v3;
const int8_t v4 = *w4++;
out[4] = v4;
const int8_t v5 = *w5++;
out[5] = v5;
const int8_t v6 = *w6++;
out[6] = v6;
const int8_t v7 = *w7++;
out[7] = v7;
const int8_t v8 = *w8++;
out[8] = v8;
const int8_t v9 = *w9++;
out[9] = v9;
const int8_t v10 = *w10++;
out[10] = v10;
const int8_t v11 = *w11++;
out[11] = v11;
const int8_t v12 = *w12++;
out[12] = v12;
const int8_t v13 = *w13++;
out[13] = v13;
const int8_t v14 = *w14++;
out[14] = v14;
const int8_t v15 = *w15++;
out[15] = v15;
out += 16;
}
out = (int8_t*) ((uintptr_t) out + extra_bytes);
w0 = w15;
}
// NC remainder (1..15)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*((uint32_t*) out) = *b++;
out += sizeof(uint32_t);
} while (--nb != 0);
} else {
size_t nb = n;
do {
*((uint32_t*) out) = 0;
out += sizeof(uint32_t);
} while (--nb != 0);
}
out += (16 - n) * sizeof(uint32_t);
// NR remainder has less than 16 rows so last row is not loaded
const int8_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const int8_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const int8_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const int8_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const int8_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const int8_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
const int8_t* w7 = w6 + kc;
if XNN_UNPREDICTABLE(n < 8) {
w7 = w6;
}
const int8_t* w8 = w7 + kc;
if XNN_UNPREDICTABLE(n <= 8) {
w8 = w7;
}
const int8_t* w9 = w8 + kc;
if XNN_UNPREDICTABLE(n < 10) {
w9 = w8;
}
const int8_t* w10 = w9 + kc;
if XNN_UNPREDICTABLE(n <= 10) {
w10 = w9;
}
const int8_t* w11 = w10 + kc;
if XNN_UNPREDICTABLE(n < 12) {
w11 = w10;
}
const int8_t* w12 = w11 + kc;
if XNN_UNPREDICTABLE(n <= 12) {
w12 = w11;
}
const int8_t* w13 = w12 + kc;
if XNN_UNPREDICTABLE(n < 14) {
w13 = w12;
}
const int8_t* w14 = w13 + kc;
if XNN_UNPREDICTABLE(n <= 14) {
w14 = w13;
}
// KC main loop multiple of 16x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const int8_t v00 = w0[0];
const int8_t v01 = w0[1];
const int8_t v02 = w0[2];
const int8_t v03 = w0[3];
w0 += 4;
const int8_t v10 = w1[0];
const int8_t v11 = w1[1];
const int8_t v12 = w1[2];
const int8_t v13 = w1[3];
w1 += 4;
const int8_t v20 = w2[0];
const int8_t v21 = w2[1];
const int8_t v22 = w2[2];
const int8_t v23 = w2[3];
w2 += 4;
const int8_t v30 = w3[0];
const int8_t v31 = w3[1];
const int8_t v32 = w3[2];
const int8_t v33 = w3[3];
w3 += 4;
const int8_t v40 = w4[0];
const int8_t v41 = w4[1];
const int8_t v42 = w4[2];
const int8_t v43 = w4[3];
w4 += 4;
const int8_t v50 = w5[0];
const int8_t v51 = w5[1];
const int8_t v52 = w5[2];
const int8_t v53 = w5[3];
w5 += 4;
const int8_t v60 = w6[0];
const int8_t v61 = w6[1];
const int8_t v62 = w6[2];
const int8_t v63 = w6[3];
w6 += 4;
const int8_t v70 = w7[0];
const int8_t v71 = w7[1];
const int8_t v72 = w7[2];
const int8_t v73 = w7[3];
w7 += 4;
const int8_t v80 = w8[0];
const int8_t v81 = w8[1];
const int8_t v82 = w8[2];
const int8_t v83 = w8[3];
w8 += 4;
const int8_t v90 = w9[0];
const int8_t v91 = w9[1];
const int8_t v92 = w9[2];
const int8_t v93 = w9[3];
w9 += 4;
const int8_t v100 = w10[0];
const int8_t v101 = w10[1];
const int8_t v102 = w10[2];
const int8_t v103 = w10[3];
w10 += 4;
const int8_t v110 = w11[0];
const int8_t v111 = w11[1];
const int8_t v112 = w11[2];
const int8_t v113 = w11[3];
w11 += 4;
const int8_t v120 = w12[0];
const int8_t v121 = w12[1];
const int8_t v122 = w12[2];
const int8_t v123 = w12[3];
w12 += 4;
const int8_t v130 = w13[0];
const int8_t v131 = w13[1];
const int8_t v132 = w13[2];
const int8_t v133 = w13[3];
w13 += 4;
const int8_t v140 = w14[0];
const int8_t v141 = w14[1];
const int8_t v142 = w14[2];
const int8_t v143 = w14[3];
w14 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[7] = v70;
out[8] = v80;
out[9] = v90;
out[10] = v100;
out[11] = v110;
out[12] = v120;
out[13] = v130;
out[14] = v140;
out[16] = v01;
out[17] = v11;
out[18] = v21;
out[19] = v31;
out[20] = v41;
out[21] = v51;
out[22] = v61;
out[23] = v71;
out[24] = v81;
out[25] = v91;
out[26] = v101;
out[27] = v111;
out[28] = v121;
out[29] = v131;
out[30] = v141;
out[32] = v02;
out[33] = v12;
out[34] = v22;
out[35] = v32;
out[36] = v42;
out[37] = v52;
out[38] = v62;
out[39] = v72;
out[40] = v82;
out[41] = v92;
out[42] = v102;
out[43] = v112;
out[44] = v122;
out[45] = v132;
out[46] = v142;
out[48] = v03;
out[49] = v13;
out[50] = v23;
out[51] = v33;
out[52] = v43;
out[53] = v53;
out[54] = v63;
out[55] = v73;
out[56] = v83;
out[57] = v93;
out[58] = v103;
out[59] = v113;
out[60] = v123;
out[61] = v133;
out[62] = v143;
out += 64;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const int8_t v0 = *w0++;
out[0] = v0;
const int8_t v1 = *w1++;
out[1] = v1;
const int8_t v2 = *w2++;
out[2] = v2;
const int8_t v3 = *w3++;
out[3] = v3;
const int8_t v4 = *w4++;
out[4] = v4;
const int8_t v5 = *w5++;
out[5] = v5;
const int8_t v6 = *w6++;
out[6] = v6;
const int8_t v7 = *w7++;
out[7] = v7;
const int8_t v8 = *w8++;
out[8] = v8;
const int8_t v9 = *w9++;
out[9] = v9;
const int8_t v10 = *w10++;
out[10] = v10;
const int8_t v11 = *w11++;
out[11] = v11;
const int8_t v12 = *w12++;
out[12] = v12;
const int8_t v13 = *w13++;
out[13] = v13;
const int8_t v14 = *w14++;
out[14] = v14;
out += 16;
}
out = (int8_t*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 14,747
| 25.863388
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-packw/gen/x8-packw-x2-gemm-goi-scalar-int-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x8_packw_gemm_goi_ukernel_x2__scalar_int_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const int8_t* weights,
const uint32_t* bias,
int8_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 2); // This kernel is for NR=2
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
int8_t* out = (int8_t*) packed_weights;
const uint32_t* b = (const uint32_t*) bias;
do {
// NC main loop multiple of 2
const int8_t* w0 = (const int8_t*) weights;
size_t n = nc;
for (;n >= 2; n -= 2) {
if XNN_LIKELY(b != NULL) {
((uint32_t*) out)[0] = b[0];
((uint32_t*) out)[1] = b[1];
b += 2;
} else {
((uint32_t*) out)[0] = 0;
((uint32_t*) out)[1] = 0;
}
out += 2 * sizeof(uint32_t);
const int8_t* w1 = w0 + kc;
// KC main loop multiple of 2x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const int8_t v00 = w0[0];
const int8_t v01 = w0[1];
const int8_t v02 = w0[2];
const int8_t v03 = w0[3];
w0 += 4;
const int8_t v10 = w1[0];
const int8_t v11 = w1[1];
const int8_t v12 = w1[2];
const int8_t v13 = w1[3];
w1 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v01;
out[3] = v11;
out[4] = v02;
out[5] = v12;
out[6] = v03;
out[7] = v13;
out += 8;
}
// KC remainder
for (; k != 0; --k) {
const int8_t v0 = *w0++;
out[0] = v0;
const int8_t v1 = *w1++;
out[1] = v1;
out += 2;
}
out = (int8_t*) ((uintptr_t) out + extra_bytes);
w0 = w1;
}
// NC remainder (1..1)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*((uint32_t*) out) = *b++;
out += sizeof(uint32_t);
} while (--nb != 0);
} else {
size_t nb = n;
do {
*((uint32_t*) out) = 0;
out += sizeof(uint32_t);
} while (--nb != 0);
}
out += (2 - n) * sizeof(uint32_t);
// KC main loop multiple of 2x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const int8_t v00 = w0[0];
const int8_t v01 = w0[1];
const int8_t v02 = w0[2];
const int8_t v03 = w0[3];
w0 += 4;
out[0] = v00;
out[2] = v01;
out[4] = v02;
out[6] = v03;
out += 8;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const int8_t v0 = *w0++;
out[0] = v0;
out += 2;
}
out = (int8_t*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 3,249
| 21.887324
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-packw/gen/x8-packw-x4-gemm-goi-scalar-int-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x8_packw_gemm_goi_ukernel_x4__scalar_int_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const int8_t* weights,
const uint32_t* bias,
int8_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 4); // This kernel is for NR=4
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
int8_t* out = (int8_t*) packed_weights;
const uint32_t* b = (const uint32_t*) bias;
do {
// NC main loop multiple of 4
const int8_t* w0 = (const int8_t*) weights;
size_t n = nc;
for (;n >= 4; n -= 4) {
if XNN_LIKELY(b != NULL) {
((uint32_t*) out)[0] = b[0];
((uint32_t*) out)[1] = b[1];
((uint32_t*) out)[2] = b[2];
((uint32_t*) out)[3] = b[3];
b += 4;
} else {
((uint32_t*) out)[0] = 0;
((uint32_t*) out)[1] = 0;
((uint32_t*) out)[2] = 0;
((uint32_t*) out)[3] = 0;
}
out += 4 * sizeof(uint32_t);
const int8_t* w1 = w0 + kc;
const int8_t* w2 = w1 + kc;
const int8_t* w3 = w2 + kc;
// KC main loop multiple of 4x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const int8_t v00 = w0[0];
const int8_t v01 = w0[1];
const int8_t v02 = w0[2];
const int8_t v03 = w0[3];
w0 += 4;
const int8_t v10 = w1[0];
const int8_t v11 = w1[1];
const int8_t v12 = w1[2];
const int8_t v13 = w1[3];
w1 += 4;
const int8_t v20 = w2[0];
const int8_t v21 = w2[1];
const int8_t v22 = w2[2];
const int8_t v23 = w2[3];
w2 += 4;
const int8_t v30 = w3[0];
const int8_t v31 = w3[1];
const int8_t v32 = w3[2];
const int8_t v33 = w3[3];
w3 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v01;
out[5] = v11;
out[6] = v21;
out[7] = v31;
out[8] = v02;
out[9] = v12;
out[10] = v22;
out[11] = v32;
out[12] = v03;
out[13] = v13;
out[14] = v23;
out[15] = v33;
out += 16;
}
// KC remainder
for (; k != 0; --k) {
const int8_t v0 = *w0++;
out[0] = v0;
const int8_t v1 = *w1++;
out[1] = v1;
const int8_t v2 = *w2++;
out[2] = v2;
const int8_t v3 = *w3++;
out[3] = v3;
out += 4;
}
out = (int8_t*) ((uintptr_t) out + extra_bytes);
w0 = w3;
}
// NC remainder (1..3)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*((uint32_t*) out) = *b++;
out += sizeof(uint32_t);
} while (--nb != 0);
} else {
size_t nb = n;
do {
*((uint32_t*) out) = 0;
out += sizeof(uint32_t);
} while (--nb != 0);
}
out += (4 - n) * sizeof(uint32_t);
// NR remainder has less than 4 rows so last row is not loaded
const int8_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const int8_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
// KC main loop multiple of 4x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const int8_t v00 = w0[0];
const int8_t v01 = w0[1];
const int8_t v02 = w0[2];
const int8_t v03 = w0[3];
w0 += 4;
const int8_t v10 = w1[0];
const int8_t v11 = w1[1];
const int8_t v12 = w1[2];
const int8_t v13 = w1[3];
w1 += 4;
const int8_t v20 = w2[0];
const int8_t v21 = w2[1];
const int8_t v22 = w2[2];
const int8_t v23 = w2[3];
w2 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[4] = v01;
out[5] = v11;
out[6] = v21;
out[8] = v02;
out[9] = v12;
out[10] = v22;
out[12] = v03;
out[13] = v13;
out[14] = v23;
out += 16;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const int8_t v0 = *w0++;
out[0] = v0;
const int8_t v1 = *w1++;
out[1] = v1;
const int8_t v2 = *w2++;
out[2] = v2;
out += 4;
}
out = (int8_t*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 4,911
| 23.437811
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-packw/gen/x8-packw-x8-gemm-goi-scalar-int-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x8-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x8_packw_gemm_goi_ukernel_x8__scalar_int_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const int8_t* weights,
const uint32_t* bias,
int8_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8); // This kernel is for NR=8
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
int8_t* out = (int8_t*) packed_weights;
const uint32_t* b = (const uint32_t*) bias;
do {
// NC main loop multiple of 8
const int8_t* w0 = (const int8_t*) weights;
size_t n = nc;
for (;n >= 8; n -= 8) {
if XNN_LIKELY(b != NULL) {
((uint32_t*) out)[0] = b[0];
((uint32_t*) out)[1] = b[1];
((uint32_t*) out)[2] = b[2];
((uint32_t*) out)[3] = b[3];
((uint32_t*) out)[4] = b[4];
((uint32_t*) out)[5] = b[5];
((uint32_t*) out)[6] = b[6];
((uint32_t*) out)[7] = b[7];
b += 8;
} else {
((uint32_t*) out)[0] = 0;
((uint32_t*) out)[1] = 0;
((uint32_t*) out)[2] = 0;
((uint32_t*) out)[3] = 0;
((uint32_t*) out)[4] = 0;
((uint32_t*) out)[5] = 0;
((uint32_t*) out)[6] = 0;
((uint32_t*) out)[7] = 0;
}
out += 8 * sizeof(uint32_t);
const int8_t* w1 = w0 + kc;
const int8_t* w2 = w1 + kc;
const int8_t* w3 = w2 + kc;
const int8_t* w4 = w3 + kc;
const int8_t* w5 = w4 + kc;
const int8_t* w6 = w5 + kc;
const int8_t* w7 = w6 + kc;
// KC main loop multiple of 8x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const int8_t v00 = w0[0];
const int8_t v01 = w0[1];
const int8_t v02 = w0[2];
const int8_t v03 = w0[3];
w0 += 4;
const int8_t v10 = w1[0];
const int8_t v11 = w1[1];
const int8_t v12 = w1[2];
const int8_t v13 = w1[3];
w1 += 4;
const int8_t v20 = w2[0];
const int8_t v21 = w2[1];
const int8_t v22 = w2[2];
const int8_t v23 = w2[3];
w2 += 4;
const int8_t v30 = w3[0];
const int8_t v31 = w3[1];
const int8_t v32 = w3[2];
const int8_t v33 = w3[3];
w3 += 4;
const int8_t v40 = w4[0];
const int8_t v41 = w4[1];
const int8_t v42 = w4[2];
const int8_t v43 = w4[3];
w4 += 4;
const int8_t v50 = w5[0];
const int8_t v51 = w5[1];
const int8_t v52 = w5[2];
const int8_t v53 = w5[3];
w5 += 4;
const int8_t v60 = w6[0];
const int8_t v61 = w6[1];
const int8_t v62 = w6[2];
const int8_t v63 = w6[3];
w6 += 4;
const int8_t v70 = w7[0];
const int8_t v71 = w7[1];
const int8_t v72 = w7[2];
const int8_t v73 = w7[3];
w7 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[7] = v70;
out[8] = v01;
out[9] = v11;
out[10] = v21;
out[11] = v31;
out[12] = v41;
out[13] = v51;
out[14] = v61;
out[15] = v71;
out[16] = v02;
out[17] = v12;
out[18] = v22;
out[19] = v32;
out[20] = v42;
out[21] = v52;
out[22] = v62;
out[23] = v72;
out[24] = v03;
out[25] = v13;
out[26] = v23;
out[27] = v33;
out[28] = v43;
out[29] = v53;
out[30] = v63;
out[31] = v73;
out += 32;
}
// KC remainder
for (; k != 0; --k) {
const int8_t v0 = *w0++;
out[0] = v0;
const int8_t v1 = *w1++;
out[1] = v1;
const int8_t v2 = *w2++;
out[2] = v2;
const int8_t v3 = *w3++;
out[3] = v3;
const int8_t v4 = *w4++;
out[4] = v4;
const int8_t v5 = *w5++;
out[5] = v5;
const int8_t v6 = *w6++;
out[6] = v6;
const int8_t v7 = *w7++;
out[7] = v7;
out += 8;
}
out = (int8_t*) ((uintptr_t) out + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*((uint32_t*) out) = *b++;
out += sizeof(uint32_t);
} while (--nb != 0);
} else {
size_t nb = n;
do {
*((uint32_t*) out) = 0;
out += sizeof(uint32_t);
} while (--nb != 0);
}
out += (8 - n) * sizeof(uint32_t);
// NR remainder has less than 8 rows so last row is not loaded
const int8_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const int8_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const int8_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const int8_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const int8_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const int8_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 8x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const int8_t v00 = w0[0];
const int8_t v01 = w0[1];
const int8_t v02 = w0[2];
const int8_t v03 = w0[3];
w0 += 4;
const int8_t v10 = w1[0];
const int8_t v11 = w1[1];
const int8_t v12 = w1[2];
const int8_t v13 = w1[3];
w1 += 4;
const int8_t v20 = w2[0];
const int8_t v21 = w2[1];
const int8_t v22 = w2[2];
const int8_t v23 = w2[3];
w2 += 4;
const int8_t v30 = w3[0];
const int8_t v31 = w3[1];
const int8_t v32 = w3[2];
const int8_t v33 = w3[3];
w3 += 4;
const int8_t v40 = w4[0];
const int8_t v41 = w4[1];
const int8_t v42 = w4[2];
const int8_t v43 = w4[3];
w4 += 4;
const int8_t v50 = w5[0];
const int8_t v51 = w5[1];
const int8_t v52 = w5[2];
const int8_t v53 = w5[3];
w5 += 4;
const int8_t v60 = w6[0];
const int8_t v61 = w6[1];
const int8_t v62 = w6[2];
const int8_t v63 = w6[3];
w6 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[8] = v01;
out[9] = v11;
out[10] = v21;
out[11] = v31;
out[12] = v41;
out[13] = v51;
out[14] = v61;
out[16] = v02;
out[17] = v12;
out[18] = v22;
out[19] = v32;
out[20] = v42;
out[21] = v52;
out[22] = v62;
out[24] = v03;
out[25] = v13;
out[26] = v23;
out[27] = v33;
out[28] = v43;
out[29] = v53;
out[30] = v63;
out += 32;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const int8_t v0 = *w0++;
out[0] = v0;
const int8_t v1 = *w1++;
out[1] = v1;
const int8_t v2 = *w2++;
out[2] = v2;
const int8_t v3 = *w3++;
out[3] = v3;
const int8_t v4 = *w4++;
out[4] = v4;
const int8_t v5 = *w5++;
out[5] = v5;
const int8_t v6 = *w6++;
out[6] = v6;
out += 8;
}
out = (int8_t*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 8,104
| 24.567823
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-16x16-reuse-switch-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__16x16_reuse_switch_zip_neon(
const uint8_t* input,
uint8_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint8_t));
assert(input_stride >= block_width * sizeof(uint8_t));
const size_t tile_height = 16;
const size_t tile_width = 16;
const size_t tile_hbytes = tile_height * sizeof(uint8_t);
const size_t tile_wbytes = tile_width * sizeof(uint8_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint8_t);
const uint8_t* i0 = input;
uint8_t* o = (uint8_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 15);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 16; bh -= 16) {
const uint8x16_t v4_0 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16_t v4_1 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16_t v4_2 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16_t v4_3 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16_t v4_4 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16_t v4_5 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16_t v4_6 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16_t v4_7 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16_t v4_8 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16_t v4_9 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16_t v4_10 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16_t v4_11 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16_t v4_12 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16_t v4_13 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16_t v4_14 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16_t v4_15 = vld1q_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x16x2_t v3_0 = vzipq_u8(v4_0, v4_8);
const uint8x16x2_t v3_1 = vzipq_u8(v4_1, v4_9);
const uint8x16x2_t v3_2 = vzipq_u8(v4_2, v4_10);
const uint8x16x2_t v3_3 = vzipq_u8(v4_3, v4_11);
const uint8x16x2_t v3_4 = vzipq_u8(v4_4, v4_12);
const uint8x16x2_t v3_5 = vzipq_u8(v4_5, v4_13);
const uint8x16x2_t v3_6 = vzipq_u8(v4_6, v4_14);
const uint8x16x2_t v3_7 = vzipq_u8(v4_7, v4_15);
const uint8x16x2_t v2_0 = vzipq_u8(v3_0.val[0], v3_4.val[0]);
const uint8x16x2_t v2_1 = vzipq_u8(v3_0.val[1], v3_4.val[1]);
const uint8x16x2_t v2_2 = vzipq_u8(v3_1.val[0], v3_5.val[0]);
const uint8x16x2_t v2_3 = vzipq_u8(v3_1.val[1], v3_5.val[1]);
const uint8x16x2_t v2_4 = vzipq_u8(v3_2.val[0], v3_6.val[0]);
const uint8x16x2_t v2_5 = vzipq_u8(v3_2.val[1], v3_6.val[1]);
const uint8x16x2_t v2_6 = vzipq_u8(v3_3.val[0], v3_7.val[0]);
const uint8x16x2_t v2_7 = vzipq_u8(v3_3.val[1], v3_7.val[1]);
const uint8x16x2_t v1_0 = vzipq_u8(v2_0.val[0], v2_4.val[0]);
const uint8x16x2_t v1_1 = vzipq_u8(v2_0.val[1], v2_4.val[1]);
const uint8x16x2_t v1_2 = vzipq_u8(v2_1.val[0], v2_5.val[0]);
const uint8x16x2_t v1_3 = vzipq_u8(v2_1.val[1], v2_5.val[1]);
const uint8x16x2_t v1_4 = vzipq_u8(v2_2.val[0], v2_6.val[0]);
const uint8x16x2_t v1_5 = vzipq_u8(v2_2.val[1], v2_6.val[1]);
const uint8x16x2_t v1_6 = vzipq_u8(v2_3.val[0], v2_7.val[0]);
const uint8x16x2_t v1_7 = vzipq_u8(v2_3.val[1], v2_7.val[1]);
const uint8x16x2_t v0_0 = vzipq_u8(v1_0.val[0], v1_4.val[0]);
const uint8x16x2_t v0_1 = vzipq_u8(v1_0.val[1], v1_4.val[1]);
const uint8x16x2_t v0_2 = vzipq_u8(v1_1.val[0], v1_5.val[0]);
const uint8x16x2_t v0_3 = vzipq_u8(v1_1.val[1], v1_5.val[1]);
const uint8x16x2_t v0_4 = vzipq_u8(v1_2.val[0], v1_6.val[0]);
const uint8x16x2_t v0_5 = vzipq_u8(v1_2.val[1], v1_6.val[1]);
const uint8x16x2_t v0_6 = vzipq_u8(v1_3.val[0], v1_7.val[0]);
const uint8x16x2_t v0_7 = vzipq_u8(v1_3.val[1], v1_7.val[1]);
uint8_t *oN = (uint8_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 15:
vst1q_u8(oN, v0_7.val[1]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 14:
vst1q_u8(oN, v0_7.val[0]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 13:
vst1q_u8(oN, v0_6.val[1]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 12:
vst1q_u8(oN, v0_6.val[0]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 11:
vst1q_u8(oN, v0_5.val[1]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 10:
vst1q_u8(oN, v0_5.val[0]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 9:
vst1q_u8(oN, v0_4.val[1]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 8:
vst1q_u8(oN, v0_4.val[0]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 7:
vst1q_u8(oN, v0_3.val[1]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1q_u8(oN, v0_3.val[0]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1q_u8(oN, v0_2.val[1]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1q_u8(oN, v0_2.val[0]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1q_u8(oN, v0_1.val[1]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1q_u8(oN, v0_1.val[0]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1q_u8(oN, v0_0.val[1]);
case 0:
vst1q_u8(o, v0_0.val[0]); o = (uint8_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const uint8x16_t v4_0 = vld1q_u8(i0);
const uint8_t *i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint8x16_t v4_1 = vld1q_u8(i1);
const uint8_t *i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint8x16_t v4_2 = vld1q_u8(i2);
const uint8_t *i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const uint8x16_t v4_3 = vld1q_u8(i3);
const uint8_t *i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const uint8x16_t v4_4 = vld1q_u8(i4);
const uint8_t *i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const uint8x16_t v4_5 = vld1q_u8(i5);
const uint8_t *i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const uint8x16_t v4_6 = vld1q_u8(i6);
const uint8_t *i7 = (const uint8_t*) ((uintptr_t) i6 + input_stride);
if XNN_UNPREDICTABLE(bh < 8) {
i7 = i6;
}
const uint8x16_t v4_7 = vld1q_u8(i7);
const uint8_t *i8 = (const uint8_t*) ((uintptr_t) i7 + input_stride);
if XNN_UNPREDICTABLE(bh <= 8) {
i8 = i7;
}
const uint8x16_t v4_8 = vld1q_u8(i8);
const uint8_t *i9 = (const uint8_t*) ((uintptr_t) i8 + input_stride);
if XNN_UNPREDICTABLE(bh < 10) {
i9 = i8;
}
const uint8x16_t v4_9 = vld1q_u8(i9);
const uint8_t *i10 = (const uint8_t*) ((uintptr_t) i9 + input_stride);
if XNN_UNPREDICTABLE(bh <= 10) {
i10 = i9;
}
const uint8x16_t v4_10 = vld1q_u8(i10);
const uint8_t *i11 = (const uint8_t*) ((uintptr_t) i10 + input_stride);
if XNN_UNPREDICTABLE(bh < 12) {
i11 = i10;
}
const uint8x16_t v4_11 = vld1q_u8(i11);
const uint8_t *i12 = (const uint8_t*) ((uintptr_t) i11 + input_stride);
if XNN_UNPREDICTABLE(bh <= 12) {
i12 = i11;
}
const uint8x16_t v4_12 = vld1q_u8(i12);
const uint8_t *i13 = (const uint8_t*) ((uintptr_t) i12 + input_stride);
if XNN_UNPREDICTABLE(bh < 14) {
i13 = i12;
}
const uint8x16_t v4_13 = vld1q_u8(i13);
const uint8_t *i14 = (const uint8_t*) ((uintptr_t) i13 + input_stride);
if XNN_UNPREDICTABLE(bh <= 14) {
i14 = i13;
}
const uint8x16_t v4_14 = vld1q_u8(i14);
const uint8x16_t v4_15 = vmovq_n_u8(0);
const uint8x16x2_t v3_0 = vzipq_u8(v4_0, v4_8);
const uint8x16x2_t v3_1 = vzipq_u8(v4_1, v4_9);
const uint8x16x2_t v3_2 = vzipq_u8(v4_2, v4_10);
const uint8x16x2_t v3_3 = vzipq_u8(v4_3, v4_11);
const uint8x16x2_t v3_4 = vzipq_u8(v4_4, v4_12);
const uint8x16x2_t v3_5 = vzipq_u8(v4_5, v4_13);
const uint8x16x2_t v3_6 = vzipq_u8(v4_6, v4_14);
const uint8x16x2_t v3_7 = vzipq_u8(v4_7, v4_15);
const uint8x16x2_t v2_0 = vzipq_u8(v3_0.val[0], v3_4.val[0]);
const uint8x16x2_t v2_1 = vzipq_u8(v3_0.val[1], v3_4.val[1]);
const uint8x16x2_t v2_2 = vzipq_u8(v3_1.val[0], v3_5.val[0]);
const uint8x16x2_t v2_3 = vzipq_u8(v3_1.val[1], v3_5.val[1]);
const uint8x16x2_t v2_4 = vzipq_u8(v3_2.val[0], v3_6.val[0]);
const uint8x16x2_t v2_5 = vzipq_u8(v3_2.val[1], v3_6.val[1]);
const uint8x16x2_t v2_6 = vzipq_u8(v3_3.val[0], v3_7.val[0]);
const uint8x16x2_t v2_7 = vzipq_u8(v3_3.val[1], v3_7.val[1]);
const uint8x16x2_t v1_0 = vzipq_u8(v2_0.val[0], v2_4.val[0]);
const uint8x16x2_t v1_1 = vzipq_u8(v2_0.val[1], v2_4.val[1]);
const uint8x16x2_t v1_2 = vzipq_u8(v2_1.val[0], v2_5.val[0]);
const uint8x16x2_t v1_3 = vzipq_u8(v2_1.val[1], v2_5.val[1]);
const uint8x16x2_t v1_4 = vzipq_u8(v2_2.val[0], v2_6.val[0]);
const uint8x16x2_t v1_5 = vzipq_u8(v2_2.val[1], v2_6.val[1]);
const uint8x16x2_t v1_6 = vzipq_u8(v2_3.val[0], v2_7.val[0]);
const uint8x16x2_t v1_7 = vzipq_u8(v2_3.val[1], v2_7.val[1]);
const uint8x16x2_t v0_0 = vzipq_u8(v1_0.val[0], v1_4.val[0]);
const uint8x16x2_t v0_1 = vzipq_u8(v1_0.val[1], v1_4.val[1]);
const uint8x16x2_t v0_2 = vzipq_u8(v1_1.val[0], v1_5.val[0]);
const uint8x16x2_t v0_3 = vzipq_u8(v1_1.val[1], v1_5.val[1]);
const uint8x16x2_t v0_4 = vzipq_u8(v1_2.val[0], v1_6.val[0]);
const uint8x16x2_t v0_5 = vzipq_u8(v1_2.val[1], v1_6.val[1]);
const uint8x16x2_t v0_6 = vzipq_u8(v1_3.val[0], v1_7.val[0]);
const uint8x16x2_t v0_7 = vzipq_u8(v1_3.val[1], v1_7.val[1]);
uint8x8_t v0_low = vget_low_u8(v0_0.val[0]);
uint8x8_t v1_low = vget_low_u8(v0_0.val[1]);
uint8x8_t v2_low = vget_low_u8(v0_1.val[0]);
uint8x8_t v3_low = vget_low_u8(v0_1.val[1]);
uint8x8_t v4_low = vget_low_u8(v0_2.val[0]);
uint8x8_t v5_low = vget_low_u8(v0_2.val[1]);
uint8x8_t v6_low = vget_low_u8(v0_3.val[0]);
uint8x8_t v7_low = vget_low_u8(v0_3.val[1]);
uint8x8_t v8_low = vget_low_u8(v0_4.val[0]);
uint8x8_t v9_low = vget_low_u8(v0_4.val[1]);
uint8x8_t v10_low = vget_low_u8(v0_5.val[0]);
uint8x8_t v11_low = vget_low_u8(v0_5.val[1]);
uint8x8_t v12_low = vget_low_u8(v0_6.val[0]);
uint8x8_t v13_low = vget_low_u8(v0_6.val[1]);
uint8x8_t v14_low = vget_low_u8(v0_7.val[0]);
uint8x8_t v15_low = vget_low_u8(v0_7.val[1]);
if (bh & 8) {
uint8_t* oN = (uint8_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 15:
vst1_u8(oN, v15_low); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 14:
vst1_u8(oN, v14_low); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 13:
vst1_u8(oN, v13_low); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 12:
vst1_u8(oN, v12_low); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 11:
vst1_u8(oN, v11_low); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 10:
vst1_u8(oN, v10_low); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 9:
vst1_u8(oN, v9_low); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 8:
vst1_u8(oN, v8_low); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 7:
vst1_u8(oN, v7_low); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_u8(oN, v6_low); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_u8(oN, v5_low); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_u8(oN, v4_low); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_u8(oN, v3_low); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_u8(oN, v2_low); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_u8(oN, v1_low);
case 0:
vst1_u8(o, v0_low); o += 8;
break;
default:
XNN_UNREACHABLE;
}
v0_low = vget_high_u8(v0_0.val[0]);
v1_low = vget_high_u8(v0_0.val[1]);
v2_low = vget_high_u8(v0_1.val[0]);
v3_low = vget_high_u8(v0_1.val[1]);
v4_low = vget_high_u8(v0_2.val[0]);
v5_low = vget_high_u8(v0_2.val[1]);
v6_low = vget_high_u8(v0_3.val[0]);
v7_low = vget_high_u8(v0_3.val[1]);
v8_low = vget_high_u8(v0_4.val[0]);
v9_low = vget_high_u8(v0_4.val[1]);
v10_low = vget_high_u8(v0_5.val[0]);
v11_low = vget_high_u8(v0_5.val[1]);
v12_low = vget_high_u8(v0_6.val[0]);
v13_low = vget_high_u8(v0_6.val[1]);
v14_low = vget_high_u8(v0_7.val[0]);
v15_low = vget_high_u8(v0_7.val[1]);
}
if (bh & 4) {
uint8_t* oN = (uint8_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 15:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v15_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 14:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v14_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 13:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v13_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 12:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v12_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 11:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v11_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 10:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v10_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 9:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v9_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 8:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v8_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 7:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v7_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v6_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v5_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v4_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v3_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v2_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v1_low), 0);
case 0:
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v0_low), 0); o += 4;
break;
default:
XNN_UNREACHABLE;
}
v0_low = vext_u8(v0_low, v0_low, 4);
v1_low = vext_u8(v1_low, v1_low, 4);
v2_low = vext_u8(v2_low, v2_low, 4);
v3_low = vext_u8(v3_low, v3_low, 4);
v4_low = vext_u8(v4_low, v4_low, 4);
v5_low = vext_u8(v5_low, v5_low, 4);
v6_low = vext_u8(v6_low, v6_low, 4);
v7_low = vext_u8(v7_low, v7_low, 4);
v8_low = vext_u8(v8_low, v8_low, 4);
v9_low = vext_u8(v9_low, v9_low, 4);
v10_low = vext_u8(v10_low, v10_low, 4);
v11_low = vext_u8(v11_low, v11_low, 4);
v12_low = vext_u8(v12_low, v12_low, 4);
v13_low = vext_u8(v13_low, v13_low, 4);
v14_low = vext_u8(v14_low, v14_low, 4);
v15_low = vext_u8(v15_low, v15_low, 4);
}
if (bh & 2) {
uint8_t* oN = (uint8_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 15:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v15_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 14:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v14_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 13:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v13_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 12:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v12_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 11:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v11_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 10:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v10_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 9:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v9_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 8:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v8_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 7:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v7_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v6_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v5_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v4_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v3_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v2_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v1_low), 0);
case 0:
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v0_low), 0); o += 2;
break;
default:
XNN_UNREACHABLE;
}
v0_low = vext_u8(v0_low, v0_low, 2);
v1_low = vext_u8(v1_low, v1_low, 2);
v2_low = vext_u8(v2_low, v2_low, 2);
v3_low = vext_u8(v3_low, v3_low, 2);
v4_low = vext_u8(v4_low, v4_low, 2);
v5_low = vext_u8(v5_low, v5_low, 2);
v6_low = vext_u8(v6_low, v6_low, 2);
v7_low = vext_u8(v7_low, v7_low, 2);
v8_low = vext_u8(v8_low, v8_low, 2);
v9_low = vext_u8(v9_low, v9_low, 2);
v10_low = vext_u8(v10_low, v10_low, 2);
v11_low = vext_u8(v11_low, v11_low, 2);
v12_low = vext_u8(v12_low, v12_low, 2);
v13_low = vext_u8(v13_low, v13_low, 2);
v14_low = vext_u8(v14_low, v14_low, 2);
v15_low = vext_u8(v15_low, v15_low, 2);
}
if (bh & 1) {
uint8_t* oN = (uint8_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 15:
vst1_lane_u8(oN, v15_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 14:
vst1_lane_u8(oN, v14_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 13:
vst1_lane_u8(oN, v13_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 12:
vst1_lane_u8(oN, v12_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 11:
vst1_lane_u8(oN, v11_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 10:
vst1_lane_u8(oN, v10_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 9:
vst1_lane_u8(oN, v9_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 8:
vst1_lane_u8(oN, v8_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 7:
vst1_lane_u8(oN, v7_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_lane_u8(oN, v6_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_lane_u8(oN, v5_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_lane_u8(oN, v4_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_lane_u8(oN, v3_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u8(oN, v2_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u8(oN, v1_low, 0);
case 0:
vst1_lane_u8(o, v0_low, 0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
o = (uint8_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 23,468
| 48.098326
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-1x2-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__1x2_scalar_int(
const uint8_t *input,
uint8_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int8_t));
assert(input_stride >= block_width * sizeof(int8_t));
const size_t tile_height = 1;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(int8_t);
const size_t input_reset = tile_wbytes - block_height * input_stride;
const size_t output_reset = tile_width * output_stride - block_height * sizeof(int8_t);
const size_t input_offset = tile_height * input_stride;
const int8_t* i0 = (const int8_t*) input;
int8_t* o0 = (int8_t*) output;
int8_t* o1 = (int8_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 1; bh -= 1) {
*o1++ = i0[1];
*o0++ = i0[0];
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_reset);
o0 = (int8_t*) ((uintptr_t) o0 + output_reset);
o1 = (int8_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 1,726
| 28.775862
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-1x4-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__1x4_scalar_int(
const uint8_t *input,
uint8_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int8_t));
assert(input_stride >= block_width * sizeof(int8_t));
const size_t tile_height = 1;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * sizeof(int8_t);
const size_t input_reset = tile_wbytes - block_height * input_stride;
const size_t output_reset = tile_width * output_stride - block_height * sizeof(int8_t);
const size_t input_offset = tile_height * input_stride;
const int8_t* i0 = (const int8_t*) input;
int8_t* o0 = (int8_t*) output;
int8_t* o1 = (int8_t*) ((uintptr_t) o0 + output_stride);
int8_t* o2 = (int8_t*) ((uintptr_t) o1 + output_stride);
int8_t* o3 = (int8_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 1; bh -= 1) {
*o3++ = i0[3];
*o2++ = i0[2];
*o1++ = i0[1];
*o0++ = i0[0];
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_reset);
o0 = (int8_t*) ((uintptr_t) o0 + output_reset);
o1 = (int8_t*) ((uintptr_t) o1 + output_reset);
o2 = (int8_t*) ((uintptr_t) o2 + output_reset);
o3 = (int8_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,121
| 29.314286
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-2x1-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__2x1_scalar_int(
const uint8_t *input,
uint8_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int8_t));
assert(input_stride >= block_width * sizeof(int8_t));
const size_t tile_height = 2;
const size_t tile_width = 1;
const size_t tile_wbytes = tile_width * sizeof(int8_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int8_t);
const size_t input_offset = tile_height * input_stride;
const int8_t* i0 = (const int8_t*) input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
int8_t* o0 = (int8_t*) output;
do {
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o0[0] = i0[0];
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
o0 = (int8_t*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 1,831
| 30.050847
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-2x2-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__2x2_scalar_int(
const uint8_t *input,
uint8_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int8_t));
assert(input_stride >= block_width * sizeof(int8_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(int8_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int8_t);
const size_t input_offset = tile_height * input_stride;
const int8_t* i0 = (const int8_t*) input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
int8_t* o0 = (int8_t*) output;
int8_t* o1 = (int8_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o1++ = i0[1];
*o1++ = i1[1];
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o1[0] = i0[1];
o0[0] = i0[0];
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
o0 = (int8_t*) ((uintptr_t) o0 + output_reset);
o1 = (int8_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,070
| 29.910448
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-2x4-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__2x4_scalar_int(
const uint8_t *input,
uint8_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int8_t));
assert(input_stride >= block_width * sizeof(int8_t));
const size_t tile_height = 2;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * sizeof(int8_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int8_t);
const size_t input_offset = tile_height * input_stride;
const int8_t* i0 = (const int8_t*) input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
int8_t* o0 = (int8_t*) output;
int8_t* o1 = (int8_t*) ((uintptr_t) o0 + output_stride);
int8_t* o2 = (int8_t*) ((uintptr_t) o1 + output_stride);
int8_t* o3 = (int8_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o3++ = i0[3];
*o3++ = i1[3];
*o2++ = i0[2];
*o2++ = i1[2];
*o1++ = i0[1];
*o1++ = i1[1];
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o3[0] = i0[3];
o2[0] = i0[2];
o1[0] = i0[1];
o0[0] = i0[0];
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
o0 = (int8_t*) ((uintptr_t) o0 + output_reset);
o1 = (int8_t*) ((uintptr_t) o1 + output_reset);
o2 = (int8_t*) ((uintptr_t) o2 + output_reset);
o3 = (int8_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,549
| 29.722892
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-4x1-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__4x1_scalar_int(
const uint8_t *input,
uint8_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int8_t));
assert(input_stride >= block_width * sizeof(int8_t));
const size_t tile_height = 4;
const size_t tile_width = 1;
const size_t tile_wbytes = tile_width * sizeof(int8_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int8_t);
const size_t input_offset = tile_height * input_stride;
const int8_t* i0 = (const int8_t*) input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
int8_t* o0 = (int8_t*) output;
do {
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i = i0;
if (bh & 2) {
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o0[0] = i[0];
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
o0 = (int8_t*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,369
| 31.027027
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-4x2-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__4x2_scalar_int(
const uint8_t *input,
uint8_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int8_t));
assert(input_stride >= block_width * sizeof(int8_t));
const size_t tile_height = 4;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(int8_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int8_t);
const size_t input_offset = tile_height * input_stride;
const int8_t* i0 = (const int8_t*) input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
int8_t* o0 = (int8_t*) output;
int8_t* o1 = (int8_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o1++ = i0[1];
*o1++ = i1[1];
*o1++ = i2[1];
*o1++ = i3[1];
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i = i0;
if (bh & 2) {
o1[0] = i0[1];
o1[1] = i1[1];
o1 += 2;
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o1[0] = i[1];
o0[0] = i[0];
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
o0 = (int8_t*) ((uintptr_t) o0 + output_reset);
o1 = (int8_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,706
| 30.114943
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-4x4-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__4x4_scalar_int(
const uint8_t *input,
uint8_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int8_t));
assert(input_stride >= block_width * sizeof(int8_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * sizeof(int8_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int8_t);
const size_t input_offset = tile_height * input_stride;
const int8_t* i0 = (const int8_t*) input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
int8_t* o0 = (int8_t*) output;
int8_t* o1 = (int8_t*) ((uintptr_t) o0 + output_stride);
int8_t* o2 = (int8_t*) ((uintptr_t) o1 + output_stride);
int8_t* o3 = (int8_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o3++ = i0[3];
*o3++ = i1[3];
*o3++ = i2[3];
*o3++ = i3[3];
*o2++ = i0[2];
*o2++ = i1[2];
*o2++ = i2[2];
*o2++ = i3[2];
*o1++ = i0[1];
*o1++ = i1[1];
*o1++ = i2[1];
*o1++ = i3[1];
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i = i0;
if (bh & 2) {
o3[0] = i0[3];
o3[1] = i1[3];
o3 += 2;
o2[0] = i0[2];
o2[1] = i1[2];
o2 += 2;
o1[0] = i0[1];
o1[1] = i1[1];
o1 += 2;
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o3[0] = i[3];
o2[0] = i[2];
o1[0] = i[1];
o0[0] = i[0];
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
o0 = (int8_t*) ((uintptr_t) o0 + output_reset);
o1 = (int8_t*) ((uintptr_t) o1 + output_reset);
o2 = (int8_t*) ((uintptr_t) o2 + output_reset);
o3 = (int8_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,381
| 28.929204
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-8x8-multi-dec-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__8x8_multi_dec_zip_neon(
const uint8_t* input,
uint8_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint8_t));
assert(input_stride >= block_width * sizeof(uint8_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint8_t);
const size_t tile_wbytes = tile_width * sizeof(uint8_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint8_t) - tile_hbytes;
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const uint8_t* i7 = (const uint8_t*) ((uintptr_t) i6 + input_stride);
uint8_t* o = (uint8_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const uint8x8_t v3_0 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_offset);
const uint8x8_t v3_1 = vld1_u8(i1); i1 = (uint8_t*) ((uintptr_t) i1 + input_offset);
const uint8x8_t v3_2 = vld1_u8(i2); i2 = (uint8_t*) ((uintptr_t) i2 + input_offset);
const uint8x8_t v3_3 = vld1_u8(i3); i3 = (uint8_t*) ((uintptr_t) i3 + input_offset);
const uint8x8_t v3_4 = vld1_u8(i4); i4 = (uint8_t*) ((uintptr_t) i4 + input_offset);
const uint8x8_t v3_5 = vld1_u8(i5); i5 = (uint8_t*) ((uintptr_t) i5 + input_offset);
const uint8x8_t v3_6 = vld1_u8(i6); i6 = (uint8_t*) ((uintptr_t) i6 + input_offset);
const uint8x8_t v3_7 = vld1_u8(i7); i7 = (uint8_t*) ((uintptr_t) i7 + input_offset);
const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
o = (uint8_t*) ((uintptr_t) o + oN_offset);
vst1_u8(o, v0_3.val[1]);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u8(o, v0_3.val[0]);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u8(o, v0_2.val[1]);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u8(o, v0_2.val[0]);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u8(o, v0_1.val[1]);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u8(o, v0_1.val[0]);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u8(o, v0_0.val[1]);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u8(o, v0_0.val[0]);
}
o = (uint8_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint8x8_t v3_0 = vld1_u8(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint8x8_t v3_1 = vld1_u8(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const uint8x8_t v3_2 = vld1_u8(i2);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i0;
}
const uint8x8_t v3_3 = vld1_u8(i3);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i0;
}
const uint8x8_t v3_4 = vld1_u8(i4);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i0;
}
const uint8x8_t v3_5 = vld1_u8(i5);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i0;
}
const uint8x8_t v3_6 = vld1_u8(i6);
const uint8x8_t v3_7 = vmov_n_u8(0);
const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
uint8x8_t v0_low = v0_0.val[0];
uint8x8_t v1_low = v0_0.val[1];
uint8x8_t v2_low = v0_1.val[0];
uint8x8_t v3_low = v0_1.val[1];
uint8x8_t v4_low = v0_2.val[0];
uint8x8_t v5_low = v0_2.val[1];
uint8x8_t v6_low = v0_3.val[0];
uint8x8_t v7_low = v0_3.val[1];
if (bh & 4) {
o = (uint8_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v7_low), 0);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v6_low), 0);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v5_low), 0);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v4_low), 0);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v3_low), 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v2_low), 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v1_low), 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v0_low), 0); o += 4;
v0_low = vext_u8(v0_low, v0_low, 4);
v1_low = vext_u8(v1_low, v1_low, 4);
v2_low = vext_u8(v2_low, v2_low, 4);
v3_low = vext_u8(v3_low, v3_low, 4);
v4_low = vext_u8(v4_low, v4_low, 4);
v5_low = vext_u8(v5_low, v5_low, 4);
v6_low = vext_u8(v6_low, v6_low, 4);
v7_low = vext_u8(v7_low, v7_low, 4);
}
if (bh & 2) {
o = (uint8_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v7_low), 0);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v6_low), 0);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v5_low), 0);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v4_low), 0);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v3_low), 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v2_low), 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v1_low), 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v0_low), 0); o += 2;
v0_low = vext_u8(v0_low, v0_low, 2);
v1_low = vext_u8(v1_low, v1_low, 2);
v2_low = vext_u8(v2_low, v2_low, 2);
v3_low = vext_u8(v3_low, v3_low, 2);
v4_low = vext_u8(v4_low, v4_low, 2);
v5_low = vext_u8(v5_low, v5_low, 2);
v6_low = vext_u8(v6_low, v6_low, 2);
v7_low = vext_u8(v7_low, v7_low, 2);
}
if (bh & 1) {
o = (uint8_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u8(o, v7_low, 0);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u8(o, v6_low, 0);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u8(o, v5_low, 0);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u8(o, v4_low, 0);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u8(o, v3_low, 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u8(o, v2_low, 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u8(o, v1_low, 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u8(o, v0_low, 0);
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
i7 = (const uint8_t*) ((uintptr_t) i6 + input_stride);
o = (uint8_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 12,072
| 40.920139
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-8x8-multi-mov-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__8x8_multi_mov_zip_neon(
const uint8_t* input,
uint8_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint8_t));
assert(input_stride >= block_width * sizeof(uint8_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint8_t);
const size_t tile_wbytes = tile_width * sizeof(uint8_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint8_t) - tile_hbytes;
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const uint8_t* i7 = (const uint8_t*) ((uintptr_t) i6 + input_stride);
uint8_t* o = (uint8_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const uint8x8_t v3_0 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_offset);
const uint8x8_t v3_1 = vld1_u8(i1); i1 = (uint8_t*) ((uintptr_t) i1 + input_offset);
const uint8x8_t v3_2 = vld1_u8(i2); i2 = (uint8_t*) ((uintptr_t) i2 + input_offset);
const uint8x8_t v3_3 = vld1_u8(i3); i3 = (uint8_t*) ((uintptr_t) i3 + input_offset);
const uint8x8_t v3_4 = vld1_u8(i4); i4 = (uint8_t*) ((uintptr_t) i4 + input_offset);
const uint8x8_t v3_5 = vld1_u8(i5); i5 = (uint8_t*) ((uintptr_t) i5 + input_offset);
const uint8x8_t v3_6 = vld1_u8(i6); i6 = (uint8_t*) ((uintptr_t) i6 + input_offset);
const uint8x8_t v3_7 = vld1_u8(i7); i7 = (uint8_t*) ((uintptr_t) i7 + input_offset);
const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
o = (uint8_t*) ((uintptr_t) o + oN_offset);
vst1_u8(o, v0_3.val[1]);
uint8_t *oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1_u8(o, v0_3.val[0]);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1_u8(o, v0_2.val[1]);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1_u8(o, v0_2.val[0]);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1_u8(o, v0_1.val[1]);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_u8(o, v0_1.val[0]);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_u8(o, v0_0.val[1]);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_u8(o, v0_0.val[0]);
}
o = (uint8_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint8x8_t v3_0 = vld1_u8(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint8x8_t v3_1 = vld1_u8(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const uint8x8_t v3_2 = vld1_u8(i2);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i0;
}
const uint8x8_t v3_3 = vld1_u8(i3);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i0;
}
const uint8x8_t v3_4 = vld1_u8(i4);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i0;
}
const uint8x8_t v3_5 = vld1_u8(i5);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i0;
}
const uint8x8_t v3_6 = vld1_u8(i6);
const uint8x8_t v3_7 = vmov_n_u8(0);
const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
uint8x8_t v0_low = v0_0.val[0];
uint8x8_t v1_low = v0_0.val[1];
uint8x8_t v2_low = v0_1.val[0];
uint8x8_t v3_low = v0_1.val[1];
uint8x8_t v4_low = v0_2.val[0];
uint8x8_t v5_low = v0_2.val[1];
uint8x8_t v6_low = v0_3.val[0];
uint8x8_t v7_low = v0_3.val[1];
if (bh & 4) {
o = (uint8_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v7_low), 0);
uint8_t *oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v6_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v5_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v4_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v3_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v2_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v1_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v0_low), 0); o += 4;
v0_low = vext_u8(v0_low, v0_low, 4);
v1_low = vext_u8(v1_low, v1_low, 4);
v2_low = vext_u8(v2_low, v2_low, 4);
v3_low = vext_u8(v3_low, v3_low, 4);
v4_low = vext_u8(v4_low, v4_low, 4);
v5_low = vext_u8(v5_low, v5_low, 4);
v6_low = vext_u8(v6_low, v6_low, 4);
v7_low = vext_u8(v7_low, v7_low, 4);
}
if (bh & 2) {
o = (uint8_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v7_low), 0);
uint8_t *oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v6_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v5_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v4_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v3_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v2_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v1_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v0_low), 0); o += 2;
v0_low = vext_u8(v0_low, v0_low, 2);
v1_low = vext_u8(v1_low, v1_low, 2);
v2_low = vext_u8(v2_low, v2_low, 2);
v3_low = vext_u8(v3_low, v3_low, 2);
v4_low = vext_u8(v4_low, v4_low, 2);
v5_low = vext_u8(v5_low, v5_low, 2);
v6_low = vext_u8(v6_low, v6_low, 2);
v7_low = vext_u8(v7_low, v7_low, 2);
}
if (bh & 1) {
o = (uint8_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u8(o, v7_low, 0);
uint8_t *oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1_lane_u8(o, v6_low, 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1_lane_u8(o, v5_low, 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1_lane_u8(o, v4_low, 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1_lane_u8(o, v3_low, 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u8(o, v2_low, 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u8(o, v1_low, 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u8(o, v0_low, 0);
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
i7 = (const uint8_t*) ((uintptr_t) i6 + input_stride);
o = (uint8_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 12,570
| 38.781646
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-8x8-multi-switch-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__8x8_multi_switch_zip_neon(
const uint8_t* input,
uint8_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint8_t));
assert(input_stride >= block_width * sizeof(uint8_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint8_t);
const size_t tile_wbytes = tile_width * sizeof(uint8_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint8_t);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const uint8_t* i7 = (const uint8_t*) ((uintptr_t) i6 + input_stride);
uint8_t* o = (uint8_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const uint8x8_t v3_0 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_offset);
const uint8x8_t v3_1 = vld1_u8(i1); i1 = (uint8_t*) ((uintptr_t) i1 + input_offset);
const uint8x8_t v3_2 = vld1_u8(i2); i2 = (uint8_t*) ((uintptr_t) i2 + input_offset);
const uint8x8_t v3_3 = vld1_u8(i3); i3 = (uint8_t*) ((uintptr_t) i3 + input_offset);
const uint8x8_t v3_4 = vld1_u8(i4); i4 = (uint8_t*) ((uintptr_t) i4 + input_offset);
const uint8x8_t v3_5 = vld1_u8(i5); i5 = (uint8_t*) ((uintptr_t) i5 + input_offset);
const uint8x8_t v3_6 = vld1_u8(i6); i6 = (uint8_t*) ((uintptr_t) i6 + input_offset);
const uint8x8_t v3_7 = vld1_u8(i7); i7 = (uint8_t*) ((uintptr_t) i7 + input_offset);
const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
uint8_t *oN = (uint8_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1_u8(oN, v0_3.val[1]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_u8(oN, v0_3.val[0]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_u8(oN, v0_2.val[1]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_u8(oN, v0_2.val[0]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_u8(oN, v0_1.val[1]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_u8(oN, v0_1.val[0]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_u8(oN, v0_0.val[1]);
case 0:
vst1_u8(o, v0_0.val[0]); o = (uint8_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const uint8x8_t v3_0 = vld1_u8(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint8x8_t v3_1 = vld1_u8(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const uint8x8_t v3_2 = vld1_u8(i2);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i0;
}
const uint8x8_t v3_3 = vld1_u8(i3);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i0;
}
const uint8x8_t v3_4 = vld1_u8(i4);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i0;
}
const uint8x8_t v3_5 = vld1_u8(i5);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i0;
}
const uint8x8_t v3_6 = vld1_u8(i6);
const uint8x8_t v3_7 = vmov_n_u8(0);
const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
uint8x8_t v0_low = v0_0.val[0];
uint8x8_t v1_low = v0_0.val[1];
uint8x8_t v2_low = v0_1.val[0];
uint8x8_t v3_low = v0_1.val[1];
uint8x8_t v4_low = v0_2.val[0];
uint8x8_t v5_low = v0_2.val[1];
uint8x8_t v6_low = v0_3.val[0];
uint8x8_t v7_low = v0_3.val[1];
if (bh & 4) {
uint8_t* oN = (uint8_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v7_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v6_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v5_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v4_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v3_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v2_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v1_low), 0);
case 0:
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v0_low), 0); o += 4;
break;
default:
XNN_UNREACHABLE;
}
v0_low = vext_u8(v0_low, v0_low, 4);
v1_low = vext_u8(v1_low, v1_low, 4);
v2_low = vext_u8(v2_low, v2_low, 4);
v3_low = vext_u8(v3_low, v3_low, 4);
v4_low = vext_u8(v4_low, v4_low, 4);
v5_low = vext_u8(v5_low, v5_low, 4);
v6_low = vext_u8(v6_low, v6_low, 4);
v7_low = vext_u8(v7_low, v7_low, 4);
}
if (bh & 2) {
uint8_t* oN = (uint8_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v7_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v6_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v5_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v4_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v3_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v2_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v1_low), 0);
case 0:
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v0_low), 0); o += 2;
break;
default:
XNN_UNREACHABLE;
}
v0_low = vext_u8(v0_low, v0_low, 2);
v1_low = vext_u8(v1_low, v1_low, 2);
v2_low = vext_u8(v2_low, v2_low, 2);
v3_low = vext_u8(v3_low, v3_low, 2);
v4_low = vext_u8(v4_low, v4_low, 2);
v5_low = vext_u8(v5_low, v5_low, 2);
v6_low = vext_u8(v6_low, v6_low, 2);
v7_low = vext_u8(v7_low, v7_low, 2);
}
if (bh & 1) {
uint8_t* oN = (uint8_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1_lane_u8(oN, v7_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_lane_u8(oN, v6_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_lane_u8(oN, v5_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_lane_u8(oN, v4_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_lane_u8(oN, v3_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u8(oN, v2_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u8(oN, v1_low, 0);
case 0:
vst1_lane_u8(o, v0_low, 0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
i7 = (const uint8_t*) ((uintptr_t) i6 + input_stride);
o = (uint8_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 11,079
| 42.622047
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-8x8-reuse-dec-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon(
const uint8_t* input,
uint8_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint8_t));
assert(input_stride >= block_width * sizeof(uint8_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint8_t);
const size_t tile_wbytes = tile_width * sizeof(uint8_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint8_t) - tile_hbytes;
const uint8_t* i0 = input;
uint8_t* o = (uint8_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const uint8x8_t v3_0 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_1 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_2 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_3 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_4 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_5 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_6 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_7 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
o = (uint8_t*) ((uintptr_t) o + oN_offset);
vst1_u8(o, v0_3.val[1]);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u8(o, v0_3.val[0]);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u8(o, v0_2.val[1]);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u8(o, v0_2.val[0]);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u8(o, v0_1.val[1]);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u8(o, v0_1.val[0]);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u8(o, v0_0.val[1]);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u8(o, v0_0.val[0]);
}
o = (uint8_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint8x8_t v3_0 = vld1_u8(i0);
const uint8_t *i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint8x8_t v3_1 = vld1_u8(i1);
const uint8_t *i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint8x8_t v3_2 = vld1_u8(i2);
const uint8_t *i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const uint8x8_t v3_3 = vld1_u8(i3);
const uint8_t *i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const uint8x8_t v3_4 = vld1_u8(i4);
const uint8_t *i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const uint8x8_t v3_5 = vld1_u8(i5);
const uint8_t *i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const uint8x8_t v3_6 = vld1_u8(i6);
const uint8x8_t v3_7 = vmov_n_u8(0);
const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
uint8x8_t v0_low = v0_0.val[0];
uint8x8_t v1_low = v0_0.val[1];
uint8x8_t v2_low = v0_1.val[0];
uint8x8_t v3_low = v0_1.val[1];
uint8x8_t v4_low = v0_2.val[0];
uint8x8_t v5_low = v0_2.val[1];
uint8x8_t v6_low = v0_3.val[0];
uint8x8_t v7_low = v0_3.val[1];
if (bh & 4) {
o = (uint8_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v7_low), 0);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v6_low), 0);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v5_low), 0);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v4_low), 0);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v3_low), 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v2_low), 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v1_low), 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v0_low), 0); o += 4;
v0_low = vext_u8(v0_low, v0_low, 4);
v1_low = vext_u8(v1_low, v1_low, 4);
v2_low = vext_u8(v2_low, v2_low, 4);
v3_low = vext_u8(v3_low, v3_low, 4);
v4_low = vext_u8(v4_low, v4_low, 4);
v5_low = vext_u8(v5_low, v5_low, 4);
v6_low = vext_u8(v6_low, v6_low, 4);
v7_low = vext_u8(v7_low, v7_low, 4);
}
if (bh & 2) {
o = (uint8_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v7_low), 0);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v6_low), 0);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v5_low), 0);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v4_low), 0);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v3_low), 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v2_low), 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v1_low), 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v0_low), 0); o += 2;
v0_low = vext_u8(v0_low, v0_low, 2);
v1_low = vext_u8(v1_low, v1_low, 2);
v2_low = vext_u8(v2_low, v2_low, 2);
v3_low = vext_u8(v3_low, v3_low, 2);
v4_low = vext_u8(v4_low, v4_low, 2);
v5_low = vext_u8(v5_low, v5_low, 2);
v6_low = vext_u8(v6_low, v6_low, 2);
v7_low = vext_u8(v7_low, v7_low, 2);
}
if (bh & 1) {
o = (uint8_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u8(o, v7_low, 0);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u8(o, v6_low, 0);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u8(o, v5_low, 0);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u8(o, v4_low, 0);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u8(o, v3_low, 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u8(o, v2_low, 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u8(o, v1_low, 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u8(o, v0_low, 0);
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
o = (uint8_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 11,553
| 40.412186
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-8x8-reuse-mov-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__8x8_reuse_mov_zip_neon(
const uint8_t* input,
uint8_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint8_t));
assert(input_stride >= block_width * sizeof(uint8_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint8_t);
const size_t tile_wbytes = tile_width * sizeof(uint8_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint8_t) - tile_hbytes;
const uint8_t* i0 = input;
uint8_t* o = (uint8_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const uint8x8_t v3_0 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_1 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_2 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_3 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_4 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_5 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_6 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_7 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
o = (uint8_t*) ((uintptr_t) o + oN_offset);
vst1_u8(o, v0_3.val[1]);
uint8_t *oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1_u8(o, v0_3.val[0]);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1_u8(o, v0_2.val[1]);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1_u8(o, v0_2.val[0]);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1_u8(o, v0_1.val[1]);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_u8(o, v0_1.val[0]);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_u8(o, v0_0.val[1]);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_u8(o, v0_0.val[0]);
}
o = (uint8_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint8x8_t v3_0 = vld1_u8(i0);
const uint8_t *i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint8x8_t v3_1 = vld1_u8(i1);
const uint8_t *i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint8x8_t v3_2 = vld1_u8(i2);
const uint8_t *i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const uint8x8_t v3_3 = vld1_u8(i3);
const uint8_t *i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const uint8x8_t v3_4 = vld1_u8(i4);
const uint8_t *i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const uint8x8_t v3_5 = vld1_u8(i5);
const uint8_t *i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const uint8x8_t v3_6 = vld1_u8(i6);
const uint8x8_t v3_7 = vmov_n_u8(0);
const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
uint8x8_t v0_low = v0_0.val[0];
uint8x8_t v1_low = v0_0.val[1];
uint8x8_t v2_low = v0_1.val[0];
uint8x8_t v3_low = v0_1.val[1];
uint8x8_t v4_low = v0_2.val[0];
uint8x8_t v5_low = v0_2.val[1];
uint8x8_t v6_low = v0_3.val[0];
uint8x8_t v7_low = v0_3.val[1];
if (bh & 4) {
o = (uint8_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v7_low), 0);
uint8_t *oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v6_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v5_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v4_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v3_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v2_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v1_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v0_low), 0); o += 4;
v0_low = vext_u8(v0_low, v0_low, 4);
v1_low = vext_u8(v1_low, v1_low, 4);
v2_low = vext_u8(v2_low, v2_low, 4);
v3_low = vext_u8(v3_low, v3_low, 4);
v4_low = vext_u8(v4_low, v4_low, 4);
v5_low = vext_u8(v5_low, v5_low, 4);
v6_low = vext_u8(v6_low, v6_low, 4);
v7_low = vext_u8(v7_low, v7_low, 4);
}
if (bh & 2) {
o = (uint8_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v7_low), 0);
uint8_t *oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v6_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v5_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v4_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v3_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v2_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v1_low), 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v0_low), 0); o += 2;
v0_low = vext_u8(v0_low, v0_low, 2);
v1_low = vext_u8(v1_low, v1_low, 2);
v2_low = vext_u8(v2_low, v2_low, 2);
v3_low = vext_u8(v3_low, v3_low, 2);
v4_low = vext_u8(v4_low, v4_low, 2);
v5_low = vext_u8(v5_low, v5_low, 2);
v6_low = vext_u8(v6_low, v6_low, 2);
v7_low = vext_u8(v7_low, v7_low, 2);
}
if (bh & 1) {
o = (uint8_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u8(o, v7_low, 0);
uint8_t *oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1_lane_u8(o, v6_low, 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1_lane_u8(o, v5_low, 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1_lane_u8(o, v4_low, 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1_lane_u8(o, v3_low, 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u8(o, v2_low, 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u8(o, v1_low, 0);
oN = (uint8_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u8(o, v0_low, 0);
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
o = (uint8_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 12,051
| 38.257329
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-8x8-reuse-multi-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__8x8_reuse_multi_zip_neon(
const uint8_t* input,
uint8_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint8_t));
assert(input_stride >= block_width * sizeof(uint8_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint8_t);
const size_t tile_wbytes = tile_width * sizeof(uint8_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint8_t);
const uint8_t* i0 = input;
uint8_t* o0 = (uint8_t*) output;
uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride);
uint8_t* o2 = (uint8_t*) ((uintptr_t) o1 + output_stride);
uint8_t* o3 = (uint8_t*) ((uintptr_t) o2 + output_stride);
uint8_t* o4 = (uint8_t*) ((uintptr_t) o3 + output_stride);
uint8_t* o5 = (uint8_t*) ((uintptr_t) o4 + output_stride);
uint8_t* o6 = (uint8_t*) ((uintptr_t) o5 + output_stride);
uint8_t* o7 = (uint8_t*) ((uintptr_t) o6 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 4) {
o4 = o0;
}
if XNN_UNPREDICTABLE(block_width < 6) {
o5 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 6) {
o6 = o0;
}
if XNN_UNPREDICTABLE(block_width < 8) {
o7 = o0;
}
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const uint8x8_t v3_0 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_1 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_2 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_3 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_4 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_5 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_6 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_7 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
vst1_u8(o7, v0_3.val[1]); o7 = (uint8_t*) ((uintptr_t) o7 + tile_hbytes);
vst1_u8(o6, v0_3.val[0]); o6 = (uint8_t*) ((uintptr_t) o6 + tile_hbytes);
vst1_u8(o5, v0_2.val[1]); o5 = (uint8_t*) ((uintptr_t) o5 + tile_hbytes);
vst1_u8(o4, v0_2.val[0]); o4 = (uint8_t*) ((uintptr_t) o4 + tile_hbytes);
vst1_u8(o3, v0_1.val[1]); o3 = (uint8_t*) ((uintptr_t) o3 + tile_hbytes);
vst1_u8(o2, v0_1.val[0]); o2 = (uint8_t*) ((uintptr_t) o2 + tile_hbytes);
vst1_u8(o1, v0_0.val[1]); o1 = (uint8_t*) ((uintptr_t) o1 + tile_hbytes);
vst1_u8(o0, v0_0.val[0]); o0 = (uint8_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const uint8x8_t v3_0 = vld1_u8(i0);
const uint8_t *i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint8x8_t v3_1 = vld1_u8(i1);
const uint8_t *i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint8x8_t v3_2 = vld1_u8(i2);
const uint8_t *i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const uint8x8_t v3_3 = vld1_u8(i3);
const uint8_t *i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const uint8x8_t v3_4 = vld1_u8(i4);
const uint8_t *i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const uint8x8_t v3_5 = vld1_u8(i5);
const uint8_t *i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const uint8x8_t v3_6 = vld1_u8(i6);
const uint8x8_t v3_7 = vmov_n_u8(0);
const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
uint8x8_t v0_low = v0_0.val[0];
uint8x8_t v1_low = v0_0.val[1];
uint8x8_t v2_low = v0_1.val[0];
uint8x8_t v3_low = v0_1.val[1];
uint8x8_t v4_low = v0_2.val[0];
uint8x8_t v5_low = v0_2.val[1];
uint8x8_t v6_low = v0_3.val[0];
uint8x8_t v7_low = v0_3.val[1];
if (bh & 4) {
vst1_lane_u32((void*) o7, vreinterpret_u32_u8(v7_low), 0); o7 += 4;
vst1_lane_u32((void*) o6, vreinterpret_u32_u8(v6_low), 0); o6 += 4;
vst1_lane_u32((void*) o5, vreinterpret_u32_u8(v5_low), 0); o5 += 4;
vst1_lane_u32((void*) o4, vreinterpret_u32_u8(v4_low), 0); o4 += 4;
vst1_lane_u32((void*) o3, vreinterpret_u32_u8(v3_low), 0); o3 += 4;
vst1_lane_u32((void*) o2, vreinterpret_u32_u8(v2_low), 0); o2 += 4;
vst1_lane_u32((void*) o1, vreinterpret_u32_u8(v1_low), 0); o1 += 4;
vst1_lane_u32((void*) o0, vreinterpret_u32_u8(v0_low), 0); o0 += 4;
v0_low = vext_u8(v0_low, v0_low, 4);
v1_low = vext_u8(v1_low, v1_low, 4);
v2_low = vext_u8(v2_low, v2_low, 4);
v3_low = vext_u8(v3_low, v3_low, 4);
v4_low = vext_u8(v4_low, v4_low, 4);
v5_low = vext_u8(v5_low, v5_low, 4);
v6_low = vext_u8(v6_low, v6_low, 4);
v7_low = vext_u8(v7_low, v7_low, 4);
}
if (bh & 2) {
vst1_lane_u16((void*) o7, vreinterpret_u16_u8(v7_low), 0); o7 += 2;
vst1_lane_u16((void*) o6, vreinterpret_u16_u8(v6_low), 0); o6 += 2;
vst1_lane_u16((void*) o5, vreinterpret_u16_u8(v5_low), 0); o5 += 2;
vst1_lane_u16((void*) o4, vreinterpret_u16_u8(v4_low), 0); o4 += 2;
vst1_lane_u16((void*) o3, vreinterpret_u16_u8(v3_low), 0); o3 += 2;
vst1_lane_u16((void*) o2, vreinterpret_u16_u8(v2_low), 0); o2 += 2;
vst1_lane_u16((void*) o1, vreinterpret_u16_u8(v1_low), 0); o1 += 2;
vst1_lane_u16((void*) o0, vreinterpret_u16_u8(v0_low), 0); o0 += 2;
v0_low = vext_u8(v0_low, v0_low, 2);
v1_low = vext_u8(v1_low, v1_low, 2);
v2_low = vext_u8(v2_low, v2_low, 2);
v3_low = vext_u8(v3_low, v3_low, 2);
v4_low = vext_u8(v4_low, v4_low, 2);
v5_low = vext_u8(v5_low, v5_low, 2);
v6_low = vext_u8(v6_low, v6_low, 2);
v7_low = vext_u8(v7_low, v7_low, 2);
}
if (bh & 1) {
vst1_lane_u8(o7, v7_low, 0);
vst1_lane_u8(o6, v6_low, 0);
vst1_lane_u8(o5, v5_low, 0);
vst1_lane_u8(o4, v4_low, 0);
vst1_lane_u8(o3, v3_low, 0);
vst1_lane_u8(o2, v2_low, 0);
vst1_lane_u8(o1, v1_low, 0);
vst1_lane_u8(o0, v0_low, 0);
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
o0 = (uint8_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint8_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint8_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint8_t*) ((uintptr_t) o3 + output_reset);
o4 = (uint8_t*) ((uintptr_t) o4 + output_reset);
o5 = (uint8_t*) ((uintptr_t) o5 + output_reset);
o6 = (uint8_t*) ((uintptr_t) o6 + output_reset);
o7 = (uint8_t*) ((uintptr_t) o7 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 9,469
| 41.850679
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-transposec/gen/x8-transposec-8x8-reuse-switch-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x8_transposec_ukernel__8x8_reuse_switch_zip_neon(
const uint8_t* input,
uint8_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint8_t));
assert(input_stride >= block_width * sizeof(uint8_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint8_t);
const size_t tile_wbytes = tile_width * sizeof(uint8_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint8_t);
const uint8_t* i0 = input;
uint8_t* o = (uint8_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const uint8x8_t v3_0 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_1 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_2 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_3 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_4 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_5 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_6 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8_t v3_7 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
uint8_t *oN = (uint8_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1_u8(oN, v0_3.val[1]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_u8(oN, v0_3.val[0]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_u8(oN, v0_2.val[1]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_u8(oN, v0_2.val[0]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_u8(oN, v0_1.val[1]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_u8(oN, v0_1.val[0]); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_u8(oN, v0_0.val[1]);
case 0:
vst1_u8(o, v0_0.val[0]); o = (uint8_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const uint8x8_t v3_0 = vld1_u8(i0);
const uint8_t *i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint8x8_t v3_1 = vld1_u8(i1);
const uint8_t *i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint8x8_t v3_2 = vld1_u8(i2);
const uint8_t *i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const uint8x8_t v3_3 = vld1_u8(i3);
const uint8_t *i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const uint8x8_t v3_4 = vld1_u8(i4);
const uint8_t *i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const uint8x8_t v3_5 = vld1_u8(i5);
const uint8_t *i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const uint8x8_t v3_6 = vld1_u8(i6);
const uint8x8_t v3_7 = vmov_n_u8(0);
const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
uint8x8_t v0_low = v0_0.val[0];
uint8x8_t v1_low = v0_0.val[1];
uint8x8_t v2_low = v0_1.val[0];
uint8x8_t v3_low = v0_1.val[1];
uint8x8_t v4_low = v0_2.val[0];
uint8x8_t v5_low = v0_2.val[1];
uint8x8_t v6_low = v0_3.val[0];
uint8x8_t v7_low = v0_3.val[1];
if (bh & 4) {
uint8_t* oN = (uint8_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v7_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v6_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v5_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v4_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v3_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v2_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u32((void*) oN, vreinterpret_u32_u8(v1_low), 0);
case 0:
vst1_lane_u32((void*) o, vreinterpret_u32_u8(v0_low), 0); o += 4;
break;
default:
XNN_UNREACHABLE;
}
v0_low = vext_u8(v0_low, v0_low, 4);
v1_low = vext_u8(v1_low, v1_low, 4);
v2_low = vext_u8(v2_low, v2_low, 4);
v3_low = vext_u8(v3_low, v3_low, 4);
v4_low = vext_u8(v4_low, v4_low, 4);
v5_low = vext_u8(v5_low, v5_low, 4);
v6_low = vext_u8(v6_low, v6_low, 4);
v7_low = vext_u8(v7_low, v7_low, 4);
}
if (bh & 2) {
uint8_t* oN = (uint8_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v7_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v6_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v5_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v4_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v3_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v2_low), 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u16((void*) oN, vreinterpret_u16_u8(v1_low), 0);
case 0:
vst1_lane_u16((void*) o, vreinterpret_u16_u8(v0_low), 0); o += 2;
break;
default:
XNN_UNREACHABLE;
}
v0_low = vext_u8(v0_low, v0_low, 2);
v1_low = vext_u8(v1_low, v1_low, 2);
v2_low = vext_u8(v2_low, v2_low, 2);
v3_low = vext_u8(v3_low, v3_low, 2);
v4_low = vext_u8(v4_low, v4_low, 2);
v5_low = vext_u8(v5_low, v5_low, 2);
v6_low = vext_u8(v6_low, v6_low, 2);
v7_low = vext_u8(v7_low, v7_low, 2);
}
if (bh & 1) {
uint8_t* oN = (uint8_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1_lane_u8(oN, v7_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_lane_u8(oN, v6_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_lane_u8(oN, v5_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_lane_u8(oN, v4_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_lane_u8(oN, v3_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u8(oN, v2_low, 0); oN = (uint8_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u8(oN, v1_low, 0);
case 0:
vst1_lane_u8(o, v0_low, 0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
o = (uint8_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 10,560
| 42.106122
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-zip/x8-zip-x2-neon.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <xnnpack/zip.h>
void xnn_x8_zip_x2_ukernel__neon(
size_t n,
const uint8_t* input,
uint8_t* output)
{
const uint8_t* x = input;
const uint8_t* y = (const uint8_t*) ((uintptr_t) x + n);
uint8_t* o = output;
if (n >= 8) {
do {
uint8x8x2_t vxy;
vxy.val[0] = vld1_u8(x); x += 8;
vxy.val[1] = vld1_u8(y); y += 8;
vst2_u8(o, vxy); o += 16;;
n -= 8;
} while (n >= 8);
if (n != 0) {
const size_t address_increment = n - 8;
uint8x8x2_t vxy;
vxy.val[0] = vld1_u8((const uint8_t*) ((uintptr_t) x + address_increment));
vxy.val[1] = vld1_u8((const uint8_t*) ((uintptr_t) y + address_increment));
vst2_u8((uint8_t*) ((uintptr_t) o + address_increment * 2), vxy);
}
} else {
do {
const uint8_t vx = *x++;
const uint8_t vy = *y++;
o[0] = vx;
o[1] = vy;
o += 2;
} while (--n != 0);
}
}
| 1,186
| 23.729167
| 81
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-zip/x8-zip-x2-sse2.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <emmintrin.h>
#include <xnnpack/zip.h>
void xnn_x8_zip_x2_ukernel__sse2(
size_t n,
const uint8_t* input,
uint8_t* output)
{
const uint8_t* x = input;
const uint8_t* y = (const uint8_t*) ((uintptr_t) x + n);
uint8_t* o = output;
if (n >= 16) {
do {
const __m128i vx = _mm_loadu_si128((const __m128i*) x);
x += 16;
const __m128i vy = _mm_loadu_si128((const __m128i*) y);
y += 16;
const __m128i vxy_lo = _mm_unpacklo_epi8(vx, vy);
const __m128i vxy_hi = _mm_unpackhi_epi8(vx, vy);
_mm_storeu_si128((__m128i*) o, vxy_lo);
_mm_storeu_si128((__m128i*) (o + 16), vxy_hi);
o = (void*) ((uintptr_t) o + 32);
n -= 16;
} while (n >= 16);
if (n != 0) {
const size_t address_increment = n - 16;
const __m128i vx = _mm_loadu_si128((const __m128i*) ((uintptr_t) x + address_increment));
const __m128i vy = _mm_loadu_si128((const __m128i*) ((uintptr_t) y + address_increment));
const __m128i vxy_lo = _mm_unpacklo_epi8(vx, vy);
const __m128i vxy_hi = _mm_unpackhi_epi8(vx, vy);
o = (void*) ((uintptr_t) o + address_increment * 2);
_mm_storeu_si128((__m128i*) o, vxy_lo);
_mm_storeu_si128((__m128i*) o + 1, vxy_hi);
}
} else {
do {
const uint8_t vx = *x++;
const uint8_t vy = *y++;
o[0] = vx;
o[1] = vy;
o += 2;
} while (--n != 0);
}
}
| 1,662
| 28.696429
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-zip/x8-zip-x3-neon.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <xnnpack/zip.h>
void xnn_x8_zip_x3_ukernel__neon(
size_t n,
const uint8_t* input,
uint8_t* output)
{
const uint8_t* x = input;
const uint8_t* y = (const uint8_t*) ((uintptr_t) x + n);
const uint8_t* z = (const uint8_t*) ((uintptr_t) y + n);
uint8_t* o = output;
if (n >= 8) {
do {
uint8x8x3_t vxyz;
vxyz.val[0] = vld1_u8(x); x += 8;
vxyz.val[1] = vld1_u8(y); y += 8;
vxyz.val[2] = vld1_u8(z); z += 8;
vst3_u8(o, vxyz); o += 24;
n -= 8;
} while (n >= 8);
if (n != 0) {
const size_t address_increment = n - 8;
uint8x8x3_t vxyz;
vxyz.val[0] = vld1_u8((const uint8_t*) ((uintptr_t) x + address_increment));
vxyz.val[1] = vld1_u8((const uint8_t*) ((uintptr_t) y + address_increment));
vxyz.val[2] = vld1_u8((const uint8_t*) ((uintptr_t) z + address_increment));
vst3_u8((uint8_t*) ((uintptr_t) o + address_increment * 3), vxyz);
}
} else {
do {
const uint8_t vx = *x++;
const uint8_t vy = *y++;
const uint8_t vz = *z++;
o[0] = vx;
o[1] = vy;
o[2] = vz;
o += 3;
} while (--n != 0);
}
}
| 1,423
| 25.867925
| 82
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-zip/x8-zip-x3-sse2.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <emmintrin.h>
#include <xnnpack/zip.h>
void xnn_x8_zip_x3_ukernel__sse2(
size_t n,
const uint8_t* input,
uint8_t* output)
{
const uint8_t* x = input;
const uint8_t* y = (const uint8_t*) ((uintptr_t) x + n);
const uint8_t* z = (const uint8_t*) ((uintptr_t) y + n);
uint8_t* o = output;
if (n >= 16) {
const __m128i vmask0x00FF00FF = _mm_set1_epi16(0x00FF);
const __m128i vmask0x0000FFFF = _mm_set1_epi32(0x0000FFFF);
do {
// vx = ( x15, x14, x13, x12, x11, x10, x9, x8, x7, x6, x5, x4, x3, x2, x1, x0 )
const __m128i vx = _mm_loadu_si128((const __m128i*) x);
x += 16;
// vy = ( y15, y14, y13, y12, y11, y10, y9, y8, y7, y6, y5, y4, y3, y2, y1, y0 )
const __m128i vy = _mm_loadu_si128((const __m128i*) y);
y += 16;
// vz = ( z15, z14, z13, z12, z11, z10, z9, z8, z7, z6, z5, z4, z3, z2, z1, z0 )
const __m128i vz = _mm_loadu_si128((const __m128i*) z);
z += 16;
// vxeye = ( y14, x14, y12, x12, y10, x10, y8, x8, y6, x6, y4, x4, y2, x2, y0, x0 )
const __m128i vxeye = _mm_or_si128(_mm_and_si128(vx, vmask0x00FF00FF), _mm_slli_epi16(vy, 8));
// vyozo = ( z15, y15, z13, y13, z11, y11, z9, y9, z7, y7, z5, y5, z3, y3, z1, y1 )
const __m128i vyozo = _mm_or_si128(_mm_andnot_si128(vmask0x00FF00FF, vz), _mm_srli_epi16(vy, 8));
// vzoxo = ( x15, z14, x13, z12, x11, z10, x9, z8, x7, z6, x5, z4, x3, z2, x1, z0 )
const __m128i vzexo = _mm_or_si128(_mm_and_si128(vz, vmask0x00FF00FF), _mm_andnot_si128(vmask0x00FF00FF, vx));
// vxeyezexo = ( x13, z12, y12, x12, x9, z8, y8, x8, x5, z4, y4, x4, x1, z0, y0, x0 )
const __m128i vxeyezexo = _mm_or_si128(_mm_and_si128(vxeye, vmask0x0000FFFF), _mm_slli_epi32(vzexo, 16));
// vyozoxeye = ( y14, x14, z13, y13, y10, x10, z9, y9, y6, x6, z5, y5, y2, x2, z1, y1 )
const __m128i vyozoxeye = _mm_or_si128(_mm_and_si128(vyozo, vmask0x0000FFFF), _mm_andnot_si128(vmask0x0000FFFF, vxeye));
// vzexoyozo = ( z15, y15, x15, z14, z11, y11, x11, z10, z7, y7, x7, z6, z3, y3, x3, z2 )
const __m128i vzexoyozo = _mm_or_si128(_mm_andnot_si128(vmask0x0000FFFF, vyozo), _mm_srli_epi32(vzexo, 16));
// vtemp0 = ( x13, z12, y12, x12, x5, z4, y4, x4, z11, y11, x11, z10, z3, y3, x3, z2 )
const __m128i vtemp0 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(vzexoyozo), _mm_castsi128_ps(vxeyezexo), _MM_SHUFFLE(3, 1, 2, 0)));
// vtemp1 = ( y10, x10, z9, y9, y2, x2, z1, y1, x9, z8, y8, x8, x1, z0, y0, x0 )
const __m128i vtemp1 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(vxeyezexo), _mm_castsi128_ps(vyozoxeye), _MM_SHUFFLE(2, 0, 2, 0)));
// vtemp2 = ( z15, y15, x15, z14, z7, y7, x7, z6, y14, x14, z13, y13, y6, x6, z5, y5 )
const __m128i vtemp2 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(vyozoxeye), _mm_castsi128_ps(vzexoyozo), _MM_SHUFFLE(3, 1, 3, 1)));
// vxyz0 = ( x5, z4, y4, x4, z3, y3, x3, z2, y2, x2, z1, y1, x1, z0, y0, x0 )
const __m128i vxyz0 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(vtemp1), _mm_castsi128_ps(vtemp0), _MM_SHUFFLE(2, 0, 2, 0)));
// vxyz1 = ( y10, x10, z9, y9, x9, z8, y8, x8, z7, y7, x7, z6, y6, x6, z5, y5 )
const __m128i vxyz1 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(vtemp2), _mm_castsi128_ps(vtemp1), _MM_SHUFFLE(3, 1, 2, 0)));
// vxyz2 = ( z15, y15, x15, z14, y14, x14, z13, y13, x13, z12, y12, x12, z11, y11, x11, z10 )
const __m128i vxyz2 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(vtemp0), _mm_castsi128_ps(vtemp2), _MM_SHUFFLE(3, 1, 3, 1)));
_mm_storeu_si128((__m128i*) o, vxyz0);
_mm_storeu_si128((__m128i*) o + 1, vxyz1);
_mm_storeu_si128((__m128i*) o + 2, vxyz2);
o += 48;
n -= 16;
} while (n >= 16);
if (n != 0) {
const size_t address_increment = n - 16;
// vx = ( x15, x14, x13, x12, x11, x10, x9, x8, x7, x6, x5, x4, x3, x2, x1, x0 )
const __m128i vx = _mm_loadu_si128((const __m128i*) ((uintptr_t) x + address_increment));
// vy = ( y15, y14, y13, y12, y11, y10, y9, y8, y7, y6, y5, y4, y3, y2, y1, y0 )
const __m128i vy = _mm_loadu_si128((const __m128i*) ((uintptr_t) y + address_increment));
// vz = ( z15, z14, z13, z12, z11, z10, z9, z8, z7, z6, z5, z4, z3, z2, z1, z0 )
const __m128i vz = _mm_loadu_si128((const __m128i*) ((uintptr_t) z + address_increment));
// vxeye = ( y14, x14, y12, x12, y10, x10, y8, x8, y6, x6, y4, x4, y2, x2, y0, x0 )
const __m128i vxeye = _mm_or_si128(_mm_and_si128(vx, vmask0x00FF00FF), _mm_slli_epi16(vy, 8));
// vyozo = ( z15, y15, z13, y13, z11, y11, z9, y9, z7, y7, z5, y5, z3, y3, z1, y1 )
const __m128i vyozo = _mm_or_si128(_mm_andnot_si128(vmask0x00FF00FF, vz), _mm_srli_epi16(vy, 8));
// vzoxo = ( x15, z14, x13, z12, x11, z10, x9, z8, x7, z6, x5, z4, x3, z2, x1, z0 )
const __m128i vzexo = _mm_or_si128(_mm_and_si128(vz, vmask0x00FF00FF), _mm_andnot_si128(vmask0x00FF00FF, vx));
// vxeyezexo = ( x13, z12, y12, x12, x9, z8, y8, x8, x5, z4, y4, x4, x1, z0, y0, x0 )
const __m128i vxeyezexo = _mm_or_si128(_mm_and_si128(vxeye, vmask0x0000FFFF), _mm_slli_epi32(vzexo, 16));
// vyozoxeye = ( y14, x14, z13, y13, y10, x10, z9, y9, y6, x6, z5, y5, y2, x2, z1, y1 )
const __m128i vyozoxeye = _mm_or_si128(_mm_and_si128(vyozo, vmask0x0000FFFF), _mm_andnot_si128(vmask0x0000FFFF, vxeye));
// vzexoyozo = ( z15, y15, x15, z14, z11, y11, x11, z10, z7, y7, x7, z6, z3, y3, x3, z2 )
const __m128i vzexoyozo = _mm_or_si128(_mm_andnot_si128(vmask0x0000FFFF, vyozo), _mm_srli_epi32(vzexo, 16));
// vtemp0 = ( x13, z12, y12, x12, x5, z4, y4, x4, z11, y11, x11, z10, z3, y3, x3, z2 )
const __m128i vtemp0 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(vzexoyozo), _mm_castsi128_ps(vxeyezexo), _MM_SHUFFLE(3, 1, 2, 0)));
// vtemp1 = ( y10, x10, z9, y9, y2, x2, z1, y1, x9, z8, y8, x8, x1, z0, y0, x0 )
const __m128i vtemp1 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(vxeyezexo), _mm_castsi128_ps(vyozoxeye), _MM_SHUFFLE(2, 0, 2, 0)));
// vtemp2 = ( z15, y15, x15, z14, z7, y7, x7, z6, y14, x14, z13, y13, y6, x6, z5, y5 )
const __m128i vtemp2 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(vyozoxeye), _mm_castsi128_ps(vzexoyozo), _MM_SHUFFLE(3, 1, 3, 1)));
// vxyz0 = ( x5, z4, y4, x4, z3, y3, x3, z2, y2, x2, z1, y1, x1, z0, y0, x0 )
const __m128i vxyz0 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(vtemp1), _mm_castsi128_ps(vtemp0), _MM_SHUFFLE(2, 0, 2, 0)));
// vxyz1 = ( y10, x10, z9, y9, x9, z8, y8, x8, z7, y7, x7, z6, y6, x6, z5, y5 )
const __m128i vxyz1 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(vtemp2), _mm_castsi128_ps(vtemp1), _MM_SHUFFLE(3, 1, 2, 0)));
// vxyz2 = ( z15, y15, x15, z14, y14, x14, z13, y13, x13, z12, y12, x12, z11, y11, x11, z10 )
const __m128i vxyz2 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(vtemp0), _mm_castsi128_ps(vtemp2), _MM_SHUFFLE(3, 1, 3, 1)));
o = (uint8_t*) ((uintptr_t) o + address_increment * 3);
_mm_storeu_si128((__m128i*) o, vxyz0);
_mm_storeu_si128((__m128i*) o + 1, vxyz1);
_mm_storeu_si128((__m128i*) o + 2, vxyz2);
}
} else {
do {
const uint8_t vx = *x++;
const uint8_t vy = *y++;
const uint8_t vz = *z++;
o[0] = vx;
o[1] = vy;
o[2] = vz;
o += 3;
} while (--n != 0);
}
}
| 8,127
| 57.898551
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-zip/x8-zip-x4-neon.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <xnnpack/zip.h>
void xnn_x8_zip_x4_ukernel__neon(
size_t n,
const uint8_t* input,
uint8_t* output)
{
const uint8_t* x = input;
const uint8_t* y = (const uint8_t*) ((uintptr_t) x + n);
const uint8_t* z = (const uint8_t*) ((uintptr_t) y + n);
const uint8_t* w = (const uint8_t*) ((uintptr_t) z + n);
uint8_t* o = output;
if (n >= 8) {
do {
uint8x8x4_t vxyzw;
vxyzw.val[0] = vld1_u8(x); x += 8;
vxyzw.val[1] = vld1_u8(y); y += 8;
vxyzw.val[2] = vld1_u8(z); z += 8;
vxyzw.val[3] = vld1_u8(w); w += 8;
vst4_u8(o, vxyzw); o += 32;
n -= 8;
} while (n >= 8);
if (n != 0) {
const size_t address_increment = n - 8;
uint8x8x4_t vxyzw;
vxyzw.val[0] = vld1_u8((const uint8_t*) ((uintptr_t) x + address_increment));
vxyzw.val[1] = vld1_u8((const uint8_t*) ((uintptr_t) y + address_increment));
vxyzw.val[2] = vld1_u8((const uint8_t*) ((uintptr_t) z + address_increment));
vxyzw.val[3] = vld1_u8((const uint8_t*) ((uintptr_t) w + address_increment));
vst4_u8((uint8_t*) ((uintptr_t) o + address_increment * 4), vxyzw);
}
} else {
do {
const uint8_t vx = *x++;
const uint8_t vy = *y++;
const uint8_t vz = *z++;
const uint8_t vw = *w++;
o[0] = vx;
o[1] = vy;
o[2] = vz;
o[3] = vw;
o += 4;
} while (--n != 0);
}
}
| 1,665
| 27.724138
| 83
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-zip/x8-zip-x4-scalar.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/zip.h>
void xnn_x8_zip_x4_ukernel__scalar(
size_t n,
const uint8_t* input,
uint8_t* output)
{
assert(n != 0);
const uint8_t* x = input;
const uint8_t* y = (const uint8_t*) ((uintptr_t) x + n);
const uint8_t* z = (const uint8_t*) ((uintptr_t) y + n);
const uint8_t* w = (const uint8_t*) ((uintptr_t) z + n);
uint8_t* o = output;
do {
const uint8_t vx = *x++;
const uint8_t vy = *y++;
const uint8_t vz = *z++;
const uint8_t vw = *w++;
o[0] = vx;
o[1] = vy;
o[2] = vz;
o[3] = vw;
o += 4;
n -= sizeof(uint8_t);
} while (n != 0);
}
| 804
| 20.184211
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-zip/x8-zip-x4-sse2.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <emmintrin.h>
#include <xnnpack/zip.h>
void xnn_x8_zip_x4_ukernel__sse2(
size_t n,
const uint8_t* input,
uint8_t* output)
{
const uint8_t* x = input;
const uint8_t* y = (const uint8_t*) ((uintptr_t) x + n);
const uint8_t* z = (const uint8_t*) ((uintptr_t) y + n);
const uint8_t* w = (const uint8_t*) ((uintptr_t) z + n);
uint8_t* o = output;
if (n >= 16) {
do {
const __m128i vx = _mm_loadu_si128((const __m128i*) x);
x += 16;
const __m128i vy = _mm_loadu_si128((const __m128i*) y);
y += 16;
const __m128i vz = _mm_loadu_si128((const __m128i*) z);
z += 16;
const __m128i vw = _mm_loadu_si128((const __m128i*) w);
w += 16;
const __m128i vxy_lo = _mm_unpacklo_epi8(vx, vy);
const __m128i vxy_hi = _mm_unpackhi_epi8(vx, vy);
const __m128i vzw_lo = _mm_unpacklo_epi8(vz, vw);
const __m128i vzw_hi = _mm_unpackhi_epi8(vz, vw);
const __m128i vxyzw0 = _mm_unpacklo_epi16(vxy_lo, vzw_lo);
const __m128i vxyzw1 = _mm_unpackhi_epi16(vxy_lo, vzw_lo);
const __m128i vxyzw2 = _mm_unpacklo_epi16(vxy_hi, vzw_hi);
const __m128i vxyzw3 = _mm_unpackhi_epi16(vxy_hi, vzw_hi);
_mm_storeu_si128((__m128i*) o, vxyzw0);
_mm_storeu_si128((__m128i*) o + 1, vxyzw1);
_mm_storeu_si128((__m128i*) o + 2, vxyzw2);
_mm_storeu_si128((__m128i*) o + 3, vxyzw3);
o = (void*) ((uintptr_t) o + 64);
n -= 16;
} while (n >= 16);
if (n != 0) {
const size_t address_increment = n - 16;
const __m128i vx = _mm_loadu_si128((const __m128i*) ((uintptr_t) x + address_increment));
const __m128i vy = _mm_loadu_si128((const __m128i*) ((uintptr_t) y + address_increment));
const __m128i vz = _mm_loadu_si128((const __m128i*) ((uintptr_t) z + address_increment));
const __m128i vw = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + address_increment));
const __m128i vxy_lo = _mm_unpacklo_epi8(vx, vy);
const __m128i vxy_hi = _mm_unpackhi_epi8(vx, vy);
const __m128i vzw_lo = _mm_unpacklo_epi8(vz, vw);
const __m128i vzw_hi = _mm_unpackhi_epi8(vz, vw);
const __m128i vxyzw0 = _mm_unpacklo_epi16(vxy_lo, vzw_lo);
const __m128i vxyzw1 = _mm_unpackhi_epi16(vxy_lo, vzw_lo);
const __m128i vxyzw2 = _mm_unpacklo_epi16(vxy_hi, vzw_hi);
const __m128i vxyzw3 = _mm_unpackhi_epi16(vxy_hi, vzw_hi);
o = (void*) ((uintptr_t) o + address_increment * 4);
_mm_storeu_si128((__m128i*) o, vxyzw0);
_mm_storeu_si128((__m128i*) o + 1, vxyzw1);
_mm_storeu_si128((__m128i*) o + 2, vxyzw2);
_mm_storeu_si128((__m128i*) o + 3, vxyzw3);
}
} else {
do {
const uint8_t vx = *x++;
const uint8_t vy = *y++;
const uint8_t vz = *z++;
const uint8_t vw = *w++;
o[0] = vx;
o[1] = vy;
o[2] = vz;
o[3] = vw;
o += 4;
} while (--n != 0);
}
}
| 3,163
| 36.666667
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-zip/x8-zip-xm-neon.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <xnnpack/zip.h>
void xnn_x8_zip_xm_ukernel__neon(
size_t n,
size_t m,
const uint8_t* input,
uint8_t* output)
{
const uint8_t* w = input;
const size_t input_increment = n * 3;
const size_t output_increment = 4 - m * n;
const uint8_t* last_input = w + n * (m - 1);
uint8_t* last_output = (uint8_t*) ((uintptr_t) output + (m - 4));
if (n >= 8) {
for (size_t i = 0; i < m; i += 4) {
size_t k = n;
w = (const uint8_t*) ((uintptr_t) w + input_increment);
if (w >= last_input) {
w = last_input;
}
const uint8_t* z = (const uint8_t*) ((uintptr_t) w - n);
const uint8_t* y = (const uint8_t*) ((uintptr_t) z - n);
const uint8_t* x = (const uint8_t*) ((uintptr_t) y - n);
while (k >= 8) {
const uint8x8_t vx = vld1_u8(x); x += 8;
const uint8x8_t vy = vld1_u8(y); y += 8;
const uint8x8_t vz = vld1_u8(z); z += 8;
const uint8x8_t vw = vld1_u8(w); w += 8;
const uint8x8x2_t vxy = vzip_u8(vx, vy);
const uint8x8x2_t vzw = vzip_u8(vz, vw);
const uint16x4x2_t vxyzw_lo = vzip_u16(vreinterpret_u16_u8(vxy.val[0]), vreinterpret_u16_u8(vzw.val[0]));
const uint16x4x2_t vxyzw_hi = vzip_u16(vreinterpret_u16_u8(vxy.val[1]), vreinterpret_u16_u8(vzw.val[1]));
vst1_lane_u32((void*) output, vreinterpret_u32_u16(vxyzw_lo.val[0]), 0);
output = (uint8_t*) ((uintptr_t) output + m);
vst1_lane_u32((void*) output, vreinterpret_u32_u16(vxyzw_lo.val[0]), 1);
output = (uint8_t*) ((uintptr_t) output + m);
vst1_lane_u32((void*) output, vreinterpret_u32_u16(vxyzw_lo.val[1]), 0);
output = (uint8_t*) ((uintptr_t) output + m);
vst1_lane_u32((void*) output, vreinterpret_u32_u16(vxyzw_lo.val[1]), 1);
output = (uint8_t*) ((uintptr_t) output + m);
vst1_lane_u32((void*) output, vreinterpret_u32_u16(vxyzw_hi.val[0]), 0);
output = (uint8_t*) ((uintptr_t) output + m);
vst1_lane_u32((void*) output, vreinterpret_u32_u16(vxyzw_hi.val[0]), 1);
output = (uint8_t*) ((uintptr_t) output + m);
vst1_lane_u32((void*) output, vreinterpret_u32_u16(vxyzw_hi.val[1]), 0);
output = (uint8_t*) ((uintptr_t) output + m);
vst1_lane_u32((void*) output, vreinterpret_u32_u16(vxyzw_hi.val[1]), 1);
output = (uint8_t*) ((uintptr_t) output + m);
k -= 8;
}
if (k != 0) {
const size_t address_increment = k - 8;
x = (const uint8_t*) ((uintptr_t) x + address_increment);
y = (const uint8_t*) ((uintptr_t) y + address_increment);
z = (const uint8_t*) ((uintptr_t) z + address_increment);
w = (const uint8_t*) ((uintptr_t) w + address_increment);
const int64x1_t vshift = vmov_n_s64(8 * address_increment);
const uint64x1_t vx = vshl_u64(vreinterpret_u64_u8(vld1_u8(x)), vshift);
const uint64x1_t vy = vshl_u64(vreinterpret_u64_u8(vld1_u8(y)), vshift);
const uint64x1_t vz = vshl_u64(vreinterpret_u64_u8(vld1_u8(z)), vshift);
const uint64x1_t vw = vshl_u64(vreinterpret_u64_u8(vld1_u8(w)), vshift); w += 8;
const uint8x8x2_t vxy = vzip_u8(vreinterpret_u8_u64(vx), vreinterpret_u8_u64(vy));
const uint8x8x2_t vzw = vzip_u8(vreinterpret_u8_u64(vz), vreinterpret_u8_u64(vw));
const uint16x4x2_t vxyzw_lo = vzip_u16(vreinterpret_u16_u8(vxy.val[0]), vreinterpret_u16_u8(vzw.val[0]));
const uint16x4x2_t vxyzw_hi = vzip_u16(vreinterpret_u16_u8(vxy.val[1]), vreinterpret_u16_u8(vzw.val[1]));
uint32x2_t vxyzw0 = vreinterpret_u32_u16(vxyzw_lo.val[0]);
uint32x2_t vxyzw1 = vreinterpret_u32_u16(vxyzw_lo.val[1]);
uint32x2_t vxyzw2 = vreinterpret_u32_u16(vxyzw_hi.val[0]);
uint32x2_t vxyzw3 = vreinterpret_u32_u16(vxyzw_hi.val[1]);
if (k & 4) {
vst1_lane_u32((void*) output, vxyzw0, 0);
output = (uint8_t*) ((uintptr_t) output + m);
vst1_lane_u32((void*) output, vxyzw0, 1);
output = (uint8_t*) ((uintptr_t) output + m);
vst1_lane_u32((void*) output, vxyzw1, 0);
output = (uint8_t*) ((uintptr_t) output + m);
vst1_lane_u32((void*) output, vxyzw1, 1);
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw0 = vxyzw2;
vxyzw1 = vxyzw3;
}
if (k & 2) {
vst1_lane_u32((void*) output, vxyzw0, 0);
output = (uint8_t*) ((uintptr_t) output + m);
vst1_lane_u32((void*) output, vxyzw0, 1);
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw0 = vxyzw1;
}
if (k & 1) {
vst1_lane_u32((void*) output, vxyzw0, 0);
output = (uint8_t*) ((uintptr_t) output + m);
}
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
if (output > last_output) {
output = last_output;
}
}
} else {
const uint8_t* i = input;
uint8_t* o = output;
size_t k = n;
do {
size_t l = m;
const uint8_t* ii = i++;
do {
*o++ = *ii;
ii += n;
} while (--l != 0);
} while (--k != 0);
}
}
| 5,432
| 36.468966
| 113
|
c
|
XNNPACK
|
XNNPACK-master/src/x8-zip/x8-zip-xm-sse2.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <emmintrin.h>
#include <xnnpack/zip.h>
#include <xnnpack/unaligned.h>
void xnn_x8_zip_xm_ukernel__sse2(
size_t n,
size_t m,
const uint8_t* input,
uint8_t* output)
{
const uint8_t* w = input;
const size_t input_increment = n * 3;
const size_t output_increment = 4 - m * n;
const uint8_t* last_input = w + n * (m - 1);
uint8_t* last_output = (uint8_t*) ((uintptr_t) output + (m - 4));
if (n >= 8) {
for (size_t i = 0; i < m; i += 4) {
size_t k = n;
w = (const uint8_t*) ((uintptr_t) w + input_increment);
if (w >= last_input) {
w = last_input;
}
const uint8_t* z = (const uint8_t*) ((uintptr_t) w - n);
const uint8_t* y = (const uint8_t*) ((uintptr_t) z - n);
const uint8_t* x = (const uint8_t*) ((uintptr_t) y - n);
while (k >= 16) {
const __m128i vx = _mm_loadu_si128((const __m128i*) x);
x += 16;
const __m128i vy = _mm_loadu_si128((const __m128i*) y);
y += 16;
const __m128i vz = _mm_loadu_si128((const __m128i*) z);
z += 16;
const __m128i vw = _mm_loadu_si128((const __m128i*) w);
w += 16;
const __m128i vxy_lo = _mm_unpacklo_epi8(vx, vy);
const __m128i vxy_hi = _mm_unpackhi_epi8(vx, vy);
const __m128i vzw_lo = _mm_unpacklo_epi8(vz, vw);
const __m128i vzw_hi = _mm_unpackhi_epi8(vz, vw);
__m128i vxyzw0 = _mm_unpacklo_epi16(vxy_lo, vzw_lo);
__m128i vxyzw1 = _mm_unpackhi_epi16(vxy_lo, vzw_lo);
__m128i vxyzw2 = _mm_unpacklo_epi16(vxy_hi, vzw_hi);
__m128i vxyzw3 = _mm_unpackhi_epi16(vxy_hi, vzw_hi);
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw0));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw0 = _mm_shufflelo_epi16(vxyzw0, _MM_SHUFFLE(3, 2, 3, 2));
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw0));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw0 = _mm_unpackhi_epi64(vxyzw0, vxyzw0);
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw0));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw0 = _mm_shufflelo_epi16(vxyzw0, _MM_SHUFFLE(3, 2, 3, 2));
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw0));
output = (uint8_t*) ((uintptr_t) output + m);
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw1));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw1 = _mm_shufflelo_epi16(vxyzw1, _MM_SHUFFLE(3, 2, 3, 2));
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw1));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw1 = _mm_unpackhi_epi64(vxyzw1, vxyzw1);
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw1));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw1 = _mm_shufflelo_epi16(vxyzw1, _MM_SHUFFLE(3, 2, 3, 2));
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw1));
output = (uint8_t*) ((uintptr_t) output + m);
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw2));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw2 = _mm_shufflelo_epi16(vxyzw2, _MM_SHUFFLE(3, 2, 3, 2));
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw2));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw2 = _mm_unpackhi_epi64(vxyzw2, vxyzw2);
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw2));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw2 = _mm_shufflelo_epi16(vxyzw2, _MM_SHUFFLE(3, 2, 3, 2));
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw2));
output = (uint8_t*) ((uintptr_t) output + m);
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw3));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw3 = _mm_shufflelo_epi16(vxyzw3, _MM_SHUFFLE(3, 2, 3, 2));
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw3));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw3 = _mm_unpackhi_epi64(vxyzw3, vxyzw3);
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw3));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw3 = _mm_shufflelo_epi16(vxyzw3, _MM_SHUFFLE(3, 2, 3, 2));
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw3));
output = (uint8_t*) ((uintptr_t) output + m);
k -= 16;
};
if (k >= 8) {
const __m128i vx = _mm_loadl_epi64((const __m128i*) x);
x += 8;
const __m128i vy = _mm_loadl_epi64((const __m128i*) y);
y += 8;
const __m128i vz = _mm_loadl_epi64((const __m128i*) z);
z += 8;
const __m128i vw = _mm_loadl_epi64((const __m128i*) w);
w += 8;
const __m128i vxy = _mm_unpacklo_epi8(vx, vy);
const __m128i vzw = _mm_unpacklo_epi8(vz, vw);
__m128i vxyzw0 = _mm_unpacklo_epi16(vxy, vzw);
__m128i vxyzw1 = _mm_unpackhi_epi16(vxy, vzw);
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw0));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw0 = _mm_shufflelo_epi16(vxyzw0, _MM_SHUFFLE(3, 2, 3, 2));
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw0));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw0 = _mm_unpackhi_epi64(vxyzw0, vxyzw0);
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw0));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw0 = _mm_shufflelo_epi16(vxyzw0, _MM_SHUFFLE(3, 2, 3, 2));
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw0));
output = (uint8_t*) ((uintptr_t) output + m);
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw1));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw1 = _mm_shufflelo_epi16(vxyzw1, _MM_SHUFFLE(3, 2, 3, 2));
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw1));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw1 = _mm_unpackhi_epi64(vxyzw1, vxyzw1);
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw1));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw1 = _mm_shufflelo_epi16(vxyzw1, _MM_SHUFFLE(3, 2, 3, 2));
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw1));
output = (uint8_t*) ((uintptr_t) output + m);
k -= 8;
}
if (k != 0) {
const size_t address_decrement = 8 - k;
x -= address_decrement;
y -= address_decrement;
z -= address_decrement;
w -= address_decrement;
const __m128i vshift = _mm_cvtsi32_si128((int) address_decrement * 8);
const __m128i vx = _mm_srl_epi64(_mm_loadl_epi64((const __m128i*) x), vshift);
const __m128i vy = _mm_srl_epi64(_mm_loadl_epi64((const __m128i*) y), vshift);
const __m128i vz = _mm_srl_epi64(_mm_loadl_epi64((const __m128i*) z), vshift);
const __m128i vw = _mm_srl_epi64(_mm_loadl_epi64((const __m128i*) w), vshift);
w += 8;
const __m128i vxy = _mm_unpacklo_epi8(vx, vy);
const __m128i vzw = _mm_unpacklo_epi8(vz, vw);
__m128i vxyzw0 = _mm_unpacklo_epi16(vxy, vzw);
__m128i vxyzw1 = _mm_unpackhi_epi16(vxy, vzw);
if (k & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw0));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw0 = _mm_shufflelo_epi16(vxyzw0, _MM_SHUFFLE(3, 2, 3, 2));
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw0));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw0 = _mm_unpackhi_epi64(vxyzw0, vxyzw0);
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw0));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw0 = _mm_shufflelo_epi16(vxyzw0, _MM_SHUFFLE(3, 2, 3, 2));
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw0));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw0 = vxyzw1;
}
if (k & 2) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw0));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw0 = _mm_shufflelo_epi16(vxyzw0, _MM_SHUFFLE(3, 2, 3, 2));
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw0));
output = (uint8_t*) ((uintptr_t) output + m);
vxyzw0 = _mm_unpackhi_epi64(vxyzw0, vxyzw0);
}
if (k & 1) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vxyzw0));
output = (uint8_t*) ((uintptr_t) output + m);
}
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
if (output > last_output) {
output = last_output;
}
}
} else {
const uint8_t* i = input;
uint8_t* o = output;
size_t k = n;
do {
size_t l = m;
const uint8_t* ii = i++;
do {
*o++ = *ii;
ii += n;
} while (--l != 0);
} while (--k != 0);
}
}
| 9,403
| 44.211538
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/xnnpack/aarch32-assembler.h
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <cstddef>
#include <cstdint>
#include <initializer_list>
#include <xnnpack/assembler.h>
// MSVC defines these tokens using macros, causing name collisions. We use these name in our assembler, so undef them.
// These macros are pulled in via xnnpack/math.h -> intrin.h -> arm_neon.h.
#include <xnnpack/math.h>
#ifdef vabs_f32
#undef vabs_f32
#endif
#ifdef vadd_f32
#undef vadd_f32
#endif
#ifdef vcvt_f32_s32
#undef vcvt_f32_s32
#endif
#ifdef vcvt_s32_f32
#undef vcvt_s32_f32
#endif
#ifdef vcvtn_s32_f32
#undef vcvtn_s32_f32
#endif
#ifdef vmax_f32
#undef vmax_f32
#endif
#ifdef vmax_s8
#undef vmax_s8
#endif
#ifdef vmin_f32
#undef vmin_f32
#endif
#ifdef vmin_s8
#undef vmin_s8
#endif
#ifdef vmla_f32
#undef vmla_f32
#endif
#ifdef vmlal_s16
#undef vmlal_s16
#endif
#ifdef vmovl_s8
#undef vmovl_s8
#endif
#ifdef vmul_f32
#undef vmul_f32
#endif
#ifdef vneg_f32
#undef vneg_f32
#endif
#ifdef vqadd_s16
#undef vqadd_s16
#endif
#ifdef vqdmulh_s32
#undef vqdmulh_s32
#endif
#ifdef vqmovn_s16
#undef vqmovn_s16
#endif
#ifdef vqmovn_s32
#undef vqmovn_s32
#endif
#ifdef vqshl_s32
#undef vqshl_s32
#endif
#ifdef vrshl_s32
#undef vrshl_s32
#endif
namespace xnnpack {
namespace aarch32 {
// Special values used to check that callee-saved registers are properly saved.
// Low 8 bits should be 0 to encode register code.
constexpr uint32_t kRRegisterCorruptValue = UINT32_C(0xDEADBE00);
constexpr uint32_t kSRegisterCorruptValue = UINT32_C(0x7FF00000);
constexpr uint8_t kRegisterCorruptMask = UINT8_C(0xFF);
// Instruction used to align code, is a nop.
constexpr uint32_t kAlignInstruction = 0xE320F000;
enum class SpecialFPRegister {
kFPSCR = 1,
};
constexpr SpecialFPRegister FPSCR = SpecialFPRegister::kFPSCR;
struct CoreRegister {
uint8_t code;
};
constexpr CoreRegister r0{0};
constexpr CoreRegister r1{1};
constexpr CoreRegister r2{2};
constexpr CoreRegister r3{3};
constexpr CoreRegister r4{4};
constexpr CoreRegister r5{5};
constexpr CoreRegister r6{6};
constexpr CoreRegister r7{7};
constexpr CoreRegister r8{8};
constexpr CoreRegister r9{9};
constexpr CoreRegister r10{10};
constexpr CoreRegister r11{11};
constexpr CoreRegister r12{12};
constexpr CoreRegister r13{13};
constexpr CoreRegister r14{14};
constexpr CoreRegister r15{15};
constexpr CoreRegister sp = r13;
constexpr CoreRegister lr = r14;
constexpr CoreRegister pc = r15;
constexpr CoreRegister APSR_nzcv = r15;
static inline bool operator==(const CoreRegister lhs, const CoreRegister rhs) {
return lhs.code == rhs.code;
}
struct CoreRegisterList {
CoreRegisterList(std::initializer_list<CoreRegister> rs) {
for (auto r : rs) {
list |= 1 << r.code;
}
}
bool has_more_than_one_register() { return (list & (list - 1)) != 0; }
// Bit i is set if CoreRegister is in the list.
uint16_t list = 0;
};
static inline bool operator==(int i, CoreRegisterList registers) {
return i == registers.list;
}
struct SRegister {
uint8_t code;
uint8_t d() const { return code & 0x1; }
uint8_t vd() const { return (code & 0x1e) >> 1; }
};
static inline bool operator==(const SRegister lhs, const SRegister rhs) {
return lhs.code == rhs.code;
}
constexpr SRegister s0{0};
constexpr SRegister s1{1};
constexpr SRegister s2{2};
constexpr SRegister s3{3};
constexpr SRegister s4{4};
constexpr SRegister s5{5};
constexpr SRegister s6{6};
constexpr SRegister s7{7};
constexpr SRegister s8{8};
constexpr SRegister s9{9};
constexpr SRegister s10{10};
constexpr SRegister s11{11};
constexpr SRegister s12{12};
constexpr SRegister s13{13};
constexpr SRegister s14{14};
constexpr SRegister s15{15};
constexpr SRegister s16{16};
constexpr SRegister s17{17};
constexpr SRegister s18{18};
constexpr SRegister s19{19};
constexpr SRegister s20{20};
constexpr SRegister s21{21};
constexpr SRegister s22{22};
constexpr SRegister s23{23};
constexpr SRegister s24{24};
constexpr SRegister s25{25};
constexpr SRegister s26{26};
constexpr SRegister s27{27};
constexpr SRegister s28{28};
constexpr SRegister s29{29};
constexpr SRegister s30{30};
constexpr SRegister s31{31};
// Define DRegisterLane before DRegister so that we can have the operator[] overloading for nice syntax.
struct DRegisterLane {
uint8_t code;
uint8_t lane;
uint8_t d() const { return (code & 0x10) >> 4; }
uint8_t vd() const { return code & 0xf; }
};
static inline bool operator==(const DRegisterLane lhs, const DRegisterLane rhs) {
return lhs.code == rhs.code && lhs.lane == rhs.lane;
}
struct DRegister {
uint8_t code;
uint8_t d() const { return (code & 0x10) >> 4; }
uint8_t vd() const { return code & 0xf; }
SRegister low() const { return SRegister{uint8_t(code * 2)}; }
SRegister high() const { return SRegister{uint8_t(code * 2 + 1)}; }
DRegisterLane operator[](std::size_t pos) const {
return DRegisterLane{code, static_cast<uint8_t>(pos)};
}
};
static inline bool operator==(const DRegister lhs, const DRegister rhs) {
return lhs.code == rhs.code;
}
constexpr DRegister d0{0};
constexpr DRegister d1{1};
constexpr DRegister d2{2};
constexpr DRegister d3{3};
constexpr DRegister d4{4};
constexpr DRegister d5{5};
constexpr DRegister d6{6};
constexpr DRegister d7{7};
constexpr DRegister d8{8};
constexpr DRegister d9{9};
constexpr DRegister d10{10};
constexpr DRegister d11{11};
constexpr DRegister d12{12};
constexpr DRegister d13{13};
constexpr DRegister d14{14};
constexpr DRegister d15{15};
constexpr DRegister d16{16};
constexpr DRegister d17{17};
constexpr DRegister d18{18};
constexpr DRegister d19{19};
constexpr DRegister d20{20};
constexpr DRegister d21{21};
constexpr DRegister d22{22};
constexpr DRegister d23{23};
constexpr DRegister d24{24};
constexpr DRegister d25{25};
constexpr DRegister d26{26};
constexpr DRegister d27{27};
constexpr DRegister d28{28};
constexpr DRegister d29{29};
constexpr DRegister d30{30};
constexpr DRegister d31{31};
struct QRegister {
uint8_t code;
// Encode code * 2.
uint8_t d() const { return (code & 0x8) >> 3; }
uint8_t vd() const { return (code & 0x7) << 1; }
DRegister low() const { return DRegister{uint8_t(code * 2)}; }
DRegister high() const { return DRegister{uint8_t(code * 2 + 1)}; }
};
static inline bool operator==(const QRegister lhs, const QRegister rhs) {
return lhs.code == rhs.code;
}
constexpr QRegister q0{0};
constexpr QRegister q1{1};
constexpr QRegister q2{2};
constexpr QRegister q3{3};
constexpr QRegister q4{4};
constexpr QRegister q5{5};
constexpr QRegister q6{6};
constexpr QRegister q7{7};
constexpr QRegister q8{8};
constexpr QRegister q9{9};
constexpr QRegister q10{10};
constexpr QRegister q11{11};
constexpr QRegister q12{12};
constexpr QRegister q13{13};
constexpr QRegister q14{14};
constexpr QRegister q15{15};
// SIMD register lists are used in a more restrictive way, compared to core
// registers, only consecutive registers are used as an operand to instruction.
template <typename RegType>
struct ConsecutiveRegisterList {
// End must be >= start.
ConsecutiveRegisterList(RegType s, RegType end)
: start(s),
length(end.code - s.code + 1) {}
explicit ConsecutiveRegisterList(RegType s, int len)
: start(s),
length(len) {}
// NOLINTNEXTLINE(google-explicit-constructor)
ConsecutiveRegisterList(RegType start)
: ConsecutiveRegisterList(start, start) {}
RegType start;
uint8_t length;
};
// Specific struct for VLD2 and VLD3 register list operand.
struct VLoadStoreRegList {
VLoadStoreRegList(DRegister reg1, DRegister reg2)
: reg1(reg1), reg2(reg2) {
if (reg1.code == reg2.code - 2) {
double_spaced = true;
} else {
double_spaced = false;
}
}
VLoadStoreRegList(DRegister reg1, DRegister reg2, DRegister reg3)
: reg1(reg1), reg2(reg2), reg3(reg3) {
if (reg1.code == reg2.code - 2) {
double_spaced = true;
} else {
double_spaced = false;
}
}
DRegister reg1;
DRegister reg2;
DRegister reg3;
bool double_spaced;
};
using SRegisterList = ConsecutiveRegisterList<SRegister>;
using DRegisterList = ConsecutiveRegisterList<DRegister>;
static inline SRegisterList operator-(const SRegister lhs, const SRegister rhs) {
return SRegisterList(lhs, rhs);
}
static inline DRegisterList operator-(const DRegister lhs, const DRegister rhs) {
return DRegisterList(lhs, rhs);
}
struct QRegisterList {
// NOLINTNEXTLINE(google-explicit-constructor)
QRegisterList(QRegister s) : start(s), length(1) {}
QRegisterList(QRegister s, QRegister end) : start(s), length(end.code - s.code + 1) {}
// Explicit conversion to DRegisterList.
explicit operator DRegisterList() const {
return DRegisterList({static_cast<uint8_t>(start.code * 2)}, length * 2);
}
QRegister start;
uint8_t length;
};
static inline QRegisterList operator-(const QRegister lhs, const QRegister rhs) {
return QRegisterList(lhs, rhs);
}
// A8.5 Addressing modes for memory access.
enum class AddressingMode {
// [<Rn>, <offset>], offset applied to address in Rn.
kOffset,
// Pre-indexed not used, so not implemented.
// [<Rn>], <offset>, address from Rn, offset applied, written back to Rn.
kPostIndexed,
};
// Memory operands, operands for memory access instructions. See
// "MemOperandHelper mem" for a nicer syntax that is closer to assembly.
class MemOperand {
public:
MemOperand(CoreRegister rn, int32_t offset)
: mode_(AddressingMode::kOffset),
rn_(rn),
offset_(offset) {}
MemOperand(CoreRegister rn, int32_t offset, AddressingMode mode)
: mode_(mode),
rn_(rn),
offset_(offset) {}
CoreRegister base() const { return rn_; }
int32_t offset() const { return offset_; }
AddressingMode mode() const { return mode_; }
// These are bits used for encoding, named based on the encoding description.
int32_t u() { return static_cast<int32_t>(offset_ >= 0); }
int32_t p() { return static_cast<int32_t>(mode_ != AddressingMode::kPostIndexed); }
// Note, kPostIndexed will write back, but doesn't need to set bit w.
int32_t w() { return 0; }
// Overload postfix increment to indicate a post-indexed addressing mode for load/stores.
MemOperand operator++(int) {
mode_ = AddressingMode::kPostIndexed;
return *this;
}
private:
AddressingMode mode_;
CoreRegister rn_;
int32_t offset_;
};
static inline bool operator==(const MemOperand lhs, const MemOperand rhs) {
return lhs.mode() == rhs.mode() && lhs.base() == rhs.base() && lhs.offset() == rhs.offset();
}
static inline MemOperand operator,(CoreRegister r, int32_t offset) {
return MemOperand(r, offset);
}
// Helper struct for some syntax sugar to look like native assembly, see mem.
struct MemOperandHelper {
MemOperand operator[](MemOperand op) const { return op; }
MemOperand operator[](CoreRegister r) const { return MemOperand(r, 0); }
};
// Use "mem" (and its overload of array subscript operator) to get some syntax
// that looks closer to native assembly when accessing memory. For example:
// - ldr(r0, mem[rn, offset]); // offset
// - ldr(r0, mem[rn], offset); // post-indexed
constexpr MemOperandHelper mem;
// Conditional execution, only support AL (always) for now.
enum Condition : uint32_t {
kEQ = 0x00000000,
kNE = 0x10000000,
kCS = 0x20000000,
kCC = 0x30000000,
kMI = 0x40000000,
kPL = 0x50000000,
kVS = 0x60000000,
kVC = 0x70000000,
kHI = 0x80000000,
kLS = 0x90000000,
kGE = 0xa0000000,
kLT = 0xB0000000,
kGT = 0xC0000000,
kLE = 0xD0000000,
kAL = 0xE0000000,
kHS = kCS,
kLO = kCC,
};
enum DataSize {
k8 = 0,
k16 = 1,
k32 = 2,
};
// A simple AAarch32 assembler.
class Assembler : public AssemblerBase {
public:
using AssemblerBase::AssemblerBase;
void add(CoreRegister rn, CoreRegister rm) { add(rn, rn, rm); }
void add(CoreRegister rd, CoreRegister rn, CoreRegister rm);
// Only support uint8_t immediates for now, it simplifies encoding.
void add(CoreRegister rd, CoreRegister rn, uint8_t imm);
void adds(CoreRegister rd, CoreRegister rn, uint8_t imm);
void and_(CoreRegister rd, CoreRegister rn, uint8_t imm);
void b(Label& l) { b(kAL, l); }
void beq(Label& l) { b(kEQ, l); }
void bne(Label& l) { b(kNE, l); }
void bhi(Label& l) { b(kHI, l); }
void bhs(Label& l) { b(kHS, l); }
void blo(Label& l) { b(kLO, l); }
void blx(CoreRegister rm);
void bic(CoreRegister rd, CoreRegister rn, uint8_t imm);
void bx(CoreRegister rm);
// Cmp supports a subset of uint32_t offsets, see "A5.2.4 Modified immediate
// constants in ARM instructions", for simplicity we start with uint8_t, which
// is fully representation using a "rotation" of 0.
void cmp(CoreRegister rn, uint8_t imm);
void cmp(CoreRegister rn, CoreRegister rm);
void ldr(CoreRegister rt, MemOperand operand, int32_t offset);
void ldr(CoreRegister rt, MemOperand operand);
// LDRD <Rt>, <Rt2>, [<Rn>{, #+/-<imm>}].
void ldrd(CoreRegister rt, CoreRegister rt2, MemOperand op);
void mov(CoreRegister rd, CoreRegister rm);
void mov(CoreRegister rd, uint16_t imm);
void movt(CoreRegister rd, uint16_t imm);
void moveq(CoreRegister rd, CoreRegister rm) { mov(kEQ, rd, rm); }
void movlo(CoreRegister rd, CoreRegister rm) { mov(kLO, rd, rm); }
void movls(CoreRegister rd, CoreRegister rm) { mov(kLS, rd, rm); }
void nop();
void pld(MemOperand operand);
void pop(CoreRegisterList regs);
void push(CoreRegisterList regs);
void str(CoreRegister rt, MemOperand op);
void sub(CoreRegister rd, CoreRegister rn, uint8_t imm);
void sub(CoreRegister rd, CoreRegister rn, CoreRegister rm);
// Only support uint8_t immediates for now, it simplifies encoding.
void subs(CoreRegister rd, CoreRegister rn, uint8_t imm);
void tst(CoreRegister rn, uint8_t imm);
// SIMD instructions.
void vabs_f32(QRegister qd, QRegister qm);
void vadd_f32(QRegister qd, QRegister qn, QRegister qm);
void vcmpe_f32(SRegister sd, SRegister sm);
void vcvt_f32_s32(QRegister qd, QRegister qm);
void vcvt_s32_f32(QRegister qd, QRegister qm);
void vcvtn_s32_f32(QRegister qd, QRegister qm);
void vdup_8(QRegister qd, DRegisterLane dm) { vdup(k8, qd, dm); }
void vdup_16(QRegister qd, DRegisterLane dm) { vdup(k16, qd, dm); }
void vdup_32(QRegister qd, DRegisterLane dm) { vdup(k32, qd, dm); }
void vext_8(QRegister qd, QRegister qn, QRegister qm, uint8_t imm4);
// VLD1.8 <list>, [<Rn>]{!} (multiple single elements).
void vld1_8(DRegisterList regs, MemOperand op) { vld1(k8, regs, op); }
void vld1_8(DRegisterList regs, MemOperand op, CoreRegister rm) { vld1(k8, regs, op, rm); }
void vld1_8(QRegisterList regs, MemOperand op) { vld1(k8, static_cast<DRegisterList>(regs), op); }
// VLD1.32 <list>, [<Rn>]{!} (multiple single elements).
void vld1_32(DRegisterList regs, MemOperand op) { vld1(k32, regs, op); }
void vld1_32(QRegisterList regs, MemOperand op) { vld1(k32, static_cast<DRegisterList>(regs), op); }
// VLD1.32 <list>, [<Rn>]{!} (single element to one lane).
void vld1_32(DRegisterLane dd, MemOperand op);
// VLD1.32 <list>, [<Rn>]{!} (single element to all lanes).
// We cannot differentiate the register list in C++ syntax, so use an instruction name similar to AArch64 LD1R.
void vld1r_32(DRegisterList regs, MemOperand op);
void vld2r_32(VLoadStoreRegList regs, MemOperand op);
void vld3r_32(VLoadStoreRegList regs, MemOperand op);
// VLDM <Rn>{!}, <list> (IA).
void vldm(MemOperand rn, SRegisterList regs);
void vldm(MemOperand rn, DRegisterList regs);
void vldr(SRegister sd, MemOperand op);
void vldr(DRegister dd, MemOperand op);
void vmax_f32(QRegister qd, QRegister qn, QRegister qm);
void vmax_s8(QRegister qd, QRegister qn, QRegister qm);
void vmin_f32(QRegister qd, QRegister qn, QRegister qm);
void vmin_s8(QRegister qd, QRegister qn, QRegister qm);
// VMLA.F32 <Sd>, <Sn>, <Sm>
void vmla_f32(SRegister sd, SRegister sn, SRegister sm);
// VMLA.F32 <Qd>, <Qn>, <Dm[x]>
void vmla_f32(QRegister qd, QRegister qn, DRegisterLane dm);
// VMLAL.S16 <Qd>, <Dn>, <Dm[x]>
void vmlal_s16(QRegister qd, DRegister dn, DRegisterLane dm);
// VMOV.I32 <Qd>, #<imm>; encoding A1
void vmov_i32(QRegister qd, uint8_t imm);
// VMOV.F32 <Qd>, #<imm>; encoding A1
void vmov(QRegister qd, uint8_t imm);
// VMOV <Rt>, <Sn>; encoding A1.
void vmov(CoreRegister rt, SRegister sn);
// VMOV <Sn>, <Rt>; encoding A1.
void vmov(SRegister sn, CoreRegister rt);
// VMOV.F32 <Sd>, <Sm>; encoding A2.
void vmov(SRegister sd, SRegister sm);
// VMOV <Dm>, <Rt>, <Rt2>; encoding A1.
void vmov(DRegister dm, CoreRegister rt, CoreRegister rt2);
// VMOV <Rt>, <Rt2>, <Dm>; encoding A1.
void vmov(CoreRegister rt, CoreRegister rt2, DRegister dm);
// VMOV <Dd>, <Dm>; encoding A1.
void vmov(DRegister dd, DRegister dm);
// VMOV <Qd>, <Qm>; encoding A1.
void vmov(QRegister qd, QRegister qm);
// VMOV_F32 <Sd>, <Sm>
void vmov_f32(SRegister sd, SRegister sm) { vmov_f32(kAL, sd, sm); }
void vmovpl_f32(SRegister sd, SRegister sm) { vmov_f32(kPL, sd, sm); }
void vmovmi_f32(SRegister sd, SRegister sm) { vmov_f32(kMI, sd, sm); }
// VMOV_F64 <Dd>, <Dm>
void vmov_f64(DRegister dd, DRegister dm);
// VMOVL.S8 <Qd>, <Dm>
void vmovl_s8(QRegister qd, DRegister dm);
void vmrs(CoreRegister rt, SpecialFPRegister spec_reg);
void vmul_f32(QRegister qd, QRegister qn, QRegister qm);
// VMUL.F32 <Qd>, <Qn>, <Dm[x]>
void vmul_f32(QRegister qd, QRegister qn, DRegisterLane dm);
void vneg_f32(QRegister qd, QRegister qm);
void vpop(DRegisterList regs);
void vpush(DRegisterList regs);
void vpush(SRegisterList regs);
void vqadd_s16(QRegister qd, QRegister qn, QRegister qm);
void vqdmulh_s32(QRegister qd, QRegister qn, DRegisterLane dm);
void vqmovn_s16(DRegister dd, QRegister qm);
void vqmovn_s32(DRegister dd, QRegister qm);
void vqshl_s32(QRegister qd, QRegister qm, QRegister qn);
void vrshl_s32(QRegister qd, QRegister qm, QRegister qn);
void vsdot_s8(QRegister qd, QRegister qn, DRegisterLane dm);
// VST1.8 <list>, [<Rn>]{!} (multiple single elements).
void vst1_8(DRegisterList regs, MemOperand op) { vst1(k8, regs, op); }
// VST1.8 <list>, [<Rn>]{!}, <Rm> (multiple single elements).
void vst1_8(DRegisterList regs, MemOperand op, CoreRegister rm) { vst1(k8, regs, op, rm); }
// VST1.8 <list>, [<Rn>]{!} (single element form one lane).
void vst1_8(DRegisterLane dd, MemOperand op) { vst1(k8, dd, op); }
// VST1.16 <list>, [<Rn>]{!} (multiple single elements).
void vst1_16(DRegisterList regs, MemOperand op) { vst1(k16, regs, op); }
// VST1.16 <list>, [<Rn>]{!}, <Rm> (multiple single elements).
void vst1_16(DRegisterList regs, MemOperand op, CoreRegister rm) { vst1(k16, regs, op, rm); }
// VST1.16 <list>, [<Rn>]{!} (single element form one lane).
void vst1_16(DRegisterLane dd, MemOperand op) { vst1(k16, dd, op); }
// VST1.32 <list>, [<Rn>]{!} (multiple single elements).
void vst1_32(DRegisterList regs, MemOperand op) { vst1(k32, regs, op); }
// VST1.32 <list>, [<Rn>]{!}, <Rm> (multiple single elements).
void vst1_32(DRegisterList regs, MemOperand op, CoreRegister rm) { vst1(k32, regs, op, rm); }
// VST1.32 <list>, [<Rn>]{!} (single element form one lane).
void vst1_32(DRegisterLane dd, MemOperand op) { vst1(k32, dd, op); }
// VSTM <Rn>{!}, <list>, consecutive 64-bit registers.
void vstm(MemOperand rn, DRegisterList regs);
// VSTR <Sd>, [Rn{, #+/-<imm>}], store single extension register to memory.
void vstr(SRegister rn, MemOperand op);
// Binds Label l to the current location in the code buffer.
void bind(Label& l);
// Align the cursor to specified number of bytes, `n` must be a power of 2.
void align(uint8_t n);
private:
void mov(Condition c, CoreRegister rd, CoreRegister rm);
void b(Condition c, Label& l);
void vdup(DataSize size, QRegister qd, DRegisterLane dm);
void vmov_f32(Condition c, SRegister sd, SRegister sm);
void vld1(DataSize size, DRegisterList regs, MemOperand op);
void vld1(DataSize size, DRegisterList regs, MemOperand op, CoreRegister rm);
void vst1(DataSize size, DRegisterList regs, MemOperand op);
void vst1(DataSize size, DRegisterList regs, MemOperand op, CoreRegister rm);
void vst1(DataSize size, DRegisterLane dd, MemOperand op);
};
class MacroAssembler : public Assembler {
using Assembler::Assembler;
public:
void f32_hardswish(QRegister sixth, QRegister three, QRegister six,
QRegister zero, const QRegister *accs, size_t num_accs,
const QRegister *tmps, size_t num_tmps);
void Mov(CoreRegister rd, uint32_t imm);
};
class TrampolineGenerator : public MacroAssembler {
using MacroAssembler::MacroAssembler;
public:
void generate(size_t args_on_stack);
private:
// Helper functions to check that registers match. We keep the expected value inside of x0 and return early once we
// have a mismatch. x0 then becomes the error code, if it is 0, there are no errors.
void CheckRegisterMatch(DRegister actual, Label& exit);
void CheckRegisterMatch(CoreRegister actual, Label& exit);
};
} // namespace aarch32
} // namespace xnnpack
| 21,299
| 32.702532
| 118
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/aarch64-assembler.h
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <cstddef>
#include <cstdint>
#include <xnnpack/assembler.h>
#include <xnnpack/microparams.h>
namespace xnnpack {
namespace aarch64 {
constexpr size_t kInstructionSizeInBytesLog2 = 2;
// Special values used to check that callee-saved registers are properly saved.
// Low 8 bits should be 0 to encode register code.
constexpr uint64_t kXRegisterCorruptValue = UINT64_C(0xDEADBEEF12345600);
constexpr uint64_t kVRegisterCorruptValue = UINT64_C(0x7FF000007F801000);
constexpr uint8_t kRegisterCorruptMask = UINT8_C(0xFF);
// Instruction used to align code, is a hlt.
constexpr uint32_t kAlignInstruction = 0xD4400000;
struct WRegister {
uint8_t code;
};
constexpr WRegister w0{0};
constexpr WRegister w1{1};
constexpr WRegister w2{2};
constexpr WRegister w3{3};
constexpr WRegister w4{4};
constexpr WRegister w5{5};
constexpr WRegister w6{6};
constexpr WRegister w7{7};
constexpr WRegister w8{8};
constexpr WRegister w9{9};
constexpr WRegister w10{10};
constexpr WRegister w11{11};
constexpr WRegister w12{12};
constexpr WRegister w13{13};
constexpr WRegister w14{14};
constexpr WRegister w15{15};
constexpr WRegister w16{16};
constexpr WRegister w17{17};
constexpr WRegister w18{18};
constexpr WRegister w19{19};
constexpr WRegister w20{20};
constexpr WRegister w21{21};
constexpr WRegister w22{22};
constexpr WRegister w23{23};
constexpr WRegister w24{24};
constexpr WRegister w25{25};
constexpr WRegister w26{26};
constexpr WRegister w27{27};
constexpr WRegister w28{28};
constexpr WRegister w29{29};
constexpr WRegister w30{30};
struct XRegister {
uint8_t code;
};
constexpr XRegister x0{0};
constexpr XRegister x1{1};
constexpr XRegister x2{2};
constexpr XRegister x3{3};
constexpr XRegister x4{4};
constexpr XRegister x5{5};
constexpr XRegister x6{6};
constexpr XRegister x7{7};
constexpr XRegister x8{8};
constexpr XRegister x9{9};
constexpr XRegister x10{10};
constexpr XRegister x11{11};
constexpr XRegister x12{12};
constexpr XRegister x13{13};
constexpr XRegister x14{14};
constexpr XRegister x15{15};
constexpr XRegister x16{16};
constexpr XRegister x17{17};
constexpr XRegister x18{18};
constexpr XRegister x19{19};
constexpr XRegister x20{20};
constexpr XRegister x21{21};
constexpr XRegister x22{22};
constexpr XRegister x23{23};
constexpr XRegister x24{24};
constexpr XRegister x25{25};
constexpr XRegister x26{26};
constexpr XRegister x27{27};
constexpr XRegister x28{28};
constexpr XRegister x29{29};
constexpr XRegister x30{30};
constexpr XRegister xzr{31};
constexpr XRegister sp{31};
struct VRegisterLane {
uint8_t code;
uint8_t size;
uint8_t lane;
bool is_h() const { return size == 1; };
bool is_s() const { return size == 2; };
};
struct ScalarVRegister{
uint8_t code;
uint8_t size;
VRegisterLane operator[](std::size_t pos) const {
return VRegisterLane{code, size, static_cast<uint8_t>(pos)};
}
};
struct VRegister {
uint8_t code;
uint8_t size;
uint8_t q;
VRegister v8b() const { return {code, 0, 0}; }
VRegister v16b() const { return {code, 0, 1}; }
VRegister v4h() const { return {code, 1, 0}; }
VRegister v8h() const { return {code, 1, 1}; }
VRegister v2s() const { return {code, 2, 0}; }
VRegister v4s() const { return {code, 2, 1}; }
VRegister v1d() const { return {code, 3, 0}; }
VRegister v2d() const { return {code, 3, 1}; }
ScalarVRegister h() const { return {code, 1}; }
ScalarVRegister s() const { return {code, 2}; }
ScalarVRegister d() const { return {code, 3}; }
bool is_h() const { return size == 1; };
bool is_s() const { return size == 2; };
};
constexpr VRegister v0{0};
constexpr VRegister v1{1};
constexpr VRegister v2{2};
constexpr VRegister v3{3};
constexpr VRegister v4{4};
constexpr VRegister v5{5};
constexpr VRegister v6{6};
constexpr VRegister v7{7};
constexpr VRegister v8{8};
constexpr VRegister v9{9};
constexpr VRegister v10{10};
constexpr VRegister v11{11};
constexpr VRegister v12{12};
constexpr VRegister v13{13};
constexpr VRegister v14{14};
constexpr VRegister v15{15};
constexpr VRegister v16{16};
constexpr VRegister v17{17};
constexpr VRegister v18{18};
constexpr VRegister v19{19};
constexpr VRegister v20{20};
constexpr VRegister v21{21};
constexpr VRegister v22{22};
constexpr VRegister v23{23};
constexpr VRegister v24{24};
constexpr VRegister v25{25};
constexpr VRegister v26{26};
constexpr VRegister v27{27};
constexpr VRegister v28{28};
constexpr VRegister v29{29};
constexpr VRegister v30{30};
constexpr VRegister v31{31};
struct VRegisterList {
// NOLINTNEXTLINE(google-explicit-constructor)
VRegisterList(VRegister vt1)
: vt1(vt1), length(1) {}
VRegisterList(VRegister vt1, VRegister vt2)
: vt1(vt1), vt2(vt2), length(2) {}
VRegisterList(VRegister vt1, VRegister vt2, VRegister vt3)
: vt1(vt1), vt2(vt2), vt3(vt3), length(3) {}
VRegisterList(VRegister vt1, VRegister vt2, VRegister vt3, VRegister vt4)
: vt1(vt1), vt2(vt2), vt3(vt3), vt4(vt4), length(4) {}
VRegister vt1;
VRegister vt2;
VRegister vt3;
VRegister vt4;
uint8_t length;
};
struct ScalarVRegisterList {
explicit ScalarVRegisterList(ScalarVRegister vt1)
: vt1(vt1), length(1) {}
ScalarVRegister vt1;
uint8_t length;
};
struct HRegister {
uint8_t code;
};
constexpr HRegister h0{0};
constexpr HRegister h1{1};
constexpr HRegister h2{2};
constexpr HRegister h3{3};
constexpr HRegister h4{4};
constexpr HRegister h5{5};
constexpr HRegister h6{6};
constexpr HRegister h7{7};
constexpr HRegister h8{8};
constexpr HRegister h9{9};
constexpr HRegister h10{10};
constexpr HRegister h11{11};
constexpr HRegister h12{12};
constexpr HRegister h13{13};
constexpr HRegister h14{14};
constexpr HRegister h15{15};
constexpr HRegister h16{16};
constexpr HRegister h17{17};
constexpr HRegister h18{18};
constexpr HRegister h19{19};
constexpr HRegister h20{20};
constexpr HRegister h21{21};
constexpr HRegister h22{22};
constexpr HRegister h23{23};
constexpr HRegister h24{24};
constexpr HRegister h25{25};
constexpr HRegister h26{26};
constexpr HRegister h27{27};
constexpr HRegister h28{28};
constexpr HRegister h29{29};
constexpr HRegister h30{30};
constexpr HRegister h31{31};
struct SRegister {
uint8_t code;
};
constexpr SRegister s0{0};
constexpr SRegister s1{1};
constexpr SRegister s2{2};
constexpr SRegister s3{3};
constexpr SRegister s4{4};
constexpr SRegister s5{5};
constexpr SRegister s6{6};
constexpr SRegister s7{7};
constexpr SRegister s8{8};
constexpr SRegister s9{9};
constexpr SRegister s10{10};
constexpr SRegister s11{11};
constexpr SRegister s12{12};
constexpr SRegister s13{13};
constexpr SRegister s14{14};
constexpr SRegister s15{15};
constexpr SRegister s16{16};
constexpr SRegister s17{17};
constexpr SRegister s18{18};
constexpr SRegister s19{19};
constexpr SRegister s20{20};
constexpr SRegister s21{21};
constexpr SRegister s22{22};
constexpr SRegister s23{23};
constexpr SRegister s24{24};
constexpr SRegister s25{25};
constexpr SRegister s26{26};
constexpr SRegister s27{27};
constexpr SRegister s28{28};
constexpr SRegister s29{29};
constexpr SRegister s30{30};
constexpr SRegister s31{31};
struct DRegister {
uint8_t code;
};
constexpr DRegister d0{0};
constexpr DRegister d1{1};
constexpr DRegister d2{2};
constexpr DRegister d3{3};
constexpr DRegister d4{4};
constexpr DRegister d5{5};
constexpr DRegister d6{6};
constexpr DRegister d7{7};
constexpr DRegister d8{8};
constexpr DRegister d9{9};
constexpr DRegister d10{10};
constexpr DRegister d11{11};
constexpr DRegister d12{12};
constexpr DRegister d13{13};
constexpr DRegister d14{14};
constexpr DRegister d15{15};
constexpr DRegister d16{16};
constexpr DRegister d17{17};
constexpr DRegister d18{18};
constexpr DRegister d19{19};
constexpr DRegister d20{20};
constexpr DRegister d21{21};
constexpr DRegister d22{22};
constexpr DRegister d23{23};
constexpr DRegister d24{24};
constexpr DRegister d25{25};
constexpr DRegister d26{26};
constexpr DRegister d27{27};
constexpr DRegister d28{28};
constexpr DRegister d29{29};
constexpr DRegister d30{30};
constexpr DRegister d31{31};
struct QRegister {
uint8_t code;
};
constexpr QRegister q0{0};
constexpr QRegister q1{1};
constexpr QRegister q2{2};
constexpr QRegister q3{3};
constexpr QRegister q4{4};
constexpr QRegister q5{5};
constexpr QRegister q6{6};
constexpr QRegister q7{7};
constexpr QRegister q8{8};
constexpr QRegister q9{9};
constexpr QRegister q10{10};
constexpr QRegister q11{11};
constexpr QRegister q12{12};
constexpr QRegister q13{13};
constexpr QRegister q14{14};
constexpr QRegister q15{15};
constexpr QRegister q16{16};
constexpr QRegister q17{17};
constexpr QRegister q18{18};
constexpr QRegister q19{19};
constexpr QRegister q20{20};
constexpr QRegister q21{21};
constexpr QRegister q22{22};
constexpr QRegister q23{23};
constexpr QRegister q24{24};
constexpr QRegister q25{25};
constexpr QRegister q26{26};
constexpr QRegister q27{27};
constexpr QRegister q28{28};
constexpr QRegister q29{29};
constexpr QRegister q30{30};
constexpr QRegister q31{31};
// C1.3.3 Load/Store addressing modes
enum class AddressingMode {
kOffset, // Base plus offset: [base{, #imm}] ; [base, Xm{, LSL #imm}].
kPostIndex, // Post-index: [base], #imm ; [base], Xm.
kPreIndex, // Pre-index: [base, #imm]!
};
struct MemOperand {
// NOLINTNEXTLINE(google-explicit-constructor)
MemOperand(XRegister xn): base(xn), mode(AddressingMode::kOffset), offset(0) {}
MemOperand(XRegister xn, int32_t offset): base(xn), mode(AddressingMode::kOffset), offset(offset) {}
MemOperand(XRegister xn, int32_t offset, AddressingMode mode): base(xn), mode(mode), offset(offset) {}
// Overload postfix increment to indicate a pre-index addressing mode for load/stores.
MemOperand operator++(int) {
mode = AddressingMode::kPreIndex;
return *this;
}
XRegister base;
AddressingMode mode;
int32_t offset;
};
static inline MemOperand operator,(XRegister r, int32_t offset) {
return MemOperand(r, offset);
}
// Helper struct for some syntax sugar to look like native assembly, see mem.
struct MemOperandHelper {
MemOperand operator[](MemOperand op) const { return op; }
MemOperand operator[](XRegister r) const { return MemOperand(r, 0); }
};
// Use "mem" (and its overload of array subscript operator) to get some syntax
// that looks closer to native assembly when accessing memory. For example:
// - ldp(x0, x1, mem[rn, offset]); // offset
// - ldp(x0, x1, mem[rn], offset); // post-indexed
constexpr MemOperandHelper mem;
enum PrefetchOp {
kPLDL1KEEP = 0,
kPSTL1KEEP = 0x10,
};
enum Condition : uint32_t {
kEQ = 0x0,
kNE = 0x1,
kCS = 0x2,
kCC = 0x3,
kMI = 0x4,
kPL = 0x5,
kVS = 0x6,
kVC = 0x7,
kHI = 0x8,
kLS = 0x9,
kGE = 0xa,
kLT = 0xB,
kGT = 0xC,
kLE = 0xD,
kAL = 0xE,
kHS = kCS,
kLO = kCC,
};
enum class BranchType {
kConditional,
// For encoding, TBZ and TBNZ are treated similarly, called TBXZ here.
kTbxz,
kUnconditional,
};
// Instruction to use for alignment.
// kNop should be used for loops, branch targets. kHlt for end of function.
enum class AlignInstruction {
kHlt,
kNop,
};
class Assembler : public AssemblerBase {
public:
using AssemblerBase::AssemblerBase;
// Base instructions.
void add(XRegister xd, XRegister xn, uint16_t imm12);
void add(XRegister xd, XRegister xn, XRegister xm);
void adds(XRegister xd, XRegister xn, uint16_t imm12);
void ands(XRegister xd, XRegister xn, uint16_t imm12);
void b(Label& l);
void b_eq(Label& l) { return b(kEQ, l); }
void b_hi(Label& l) { return b(kHI, l); }
void b_hs(Label& l) { return b(kHS, l); }
void b_lo(Label& l) { return b(kLO, l); }
void b_ne(Label& l) { return b(kNE, l); }
void bl(int32_t offset);
void blr(XRegister xn);
void cmp(XRegister xn, uint16_t imm12);
void cmp(XRegister xn, XRegister xm);
void csel(XRegister xd, XRegister xn, XRegister xm, Condition c);
void hlt();
void ldp(XRegister xt1, XRegister xt2, MemOperand xn);
void ldp(XRegister xt1, XRegister xt2, MemOperand xn, int32_t imm);
void ldr(XRegister xt, MemOperand xn);
void ldr(WRegister xt, MemOperand xn, int32_t imm);
void ldr(XRegister xt, MemOperand xn, int32_t imm);
void mov(XRegister xd, uint16_t imm);
void mov(XRegister xd, XRegister xn);
void movk(XRegister xd, uint16_t imm, uint8_t shift);
void nop();
void prfm(PrefetchOp prfop, MemOperand xn);
void ret();
void stp(XRegister xt1, XRegister xt2, MemOperand xn);
void str(XRegister xt1, MemOperand xn);
void sub(XRegister xd, XRegister xn, XRegister xm);
void sub(XRegister xd, XRegister xn, uint16_t imm12);
void subs(XRegister xd, XRegister xn, uint16_t imm12);
void tbnz(XRegister xd, uint8_t bit, Label& l);
void tbz(XRegister xd, uint8_t bit, Label& l);
// Only immediates with lowest N bits set are supported.
void tst(XRegister xn, uint8_t imm);
// SIMD instructions
void dup(DRegister vd, VRegisterLane vn);
void dup(SRegister vd, VRegisterLane vn);
void dup(VRegister vd, VRegisterLane vn);
void fabs(VRegister vd, VRegister vn);
void fadd(VRegister vd, VRegister vn, VRegister vm);
void fmax(VRegister vd, VRegister vn, VRegister vm);
void fmin(VRegister vd, VRegister vn, VRegister vm);
void fmla(VRegister vd, VRegister vn, VRegisterLane vm);
void fmul(VRegister vd, VRegister vn, VRegister vm);
void fneg(VRegister vd, VRegister vn);
void ins(VRegisterLane vd, XRegister vn);
void ld1(VRegisterList vs, MemOperand xn, int32_t imm);
// LD1 (single structure).
// ld1({v1.d()}[0], mem[x0], 0) is invalid syntax, so the lane is a separate arg.
void ld1(ScalarVRegisterList vs, size_t lane, MemOperand xn, int32_t imm);
// Convenience overload for callers with single V register in list.
void ld1(ScalarVRegister v, size_t lane, MemOperand xn, int32_t imm);
void ld1r(VRegisterList xs, MemOperand xn);
void ld2r(VRegisterList xs, MemOperand xn);
void ld3r(VRegisterList xs, MemOperand xn);
void ldp(DRegister dt1, DRegister dt2, MemOperand xn);
void ldp(DRegister dt1, DRegister dt2, MemOperand xn, int32_t imm);
void ldp(QRegister qt1, QRegister qt2, MemOperand xn, int32_t imm);
void ldr(DRegister dt, MemOperand xn);
void ldr(SRegister dt, MemOperand xn);
void ldr(QRegister dt, MemOperand xn);
void ldr(DRegister dt, MemOperand xn, int32_t imm);
void ldr(HRegister dt, MemOperand xn, int32_t imm);
void ldr(QRegister qt, MemOperand xn, int32_t imm);
void ldr(SRegister st, MemOperand xn, int32_t imm);
void mov(VRegister vd, VRegister vn);
void movi(VRegister vd, uint8_t imm);
// MOV (to general).
void mov(XRegister xd, VRegisterLane vn);
void st1(VRegisterList vs, MemOperand xn, int32_t imm);
void st1(VRegisterList vs, MemOperand xn, XRegister xm);
void stp(DRegister dt1, DRegister dt2, MemOperand xn);
void stp(QRegister qt1, QRegister qt2, MemOperand xn);
void stp(QRegister qt1, QRegister qt2, MemOperand xn, int32_t imm);
void str(HRegister ht, MemOperand xn);
void str(SRegister st, MemOperand xn);
void str(DRegister dt, MemOperand xn, int32_t imm);
void str(QRegister qt, MemOperand xn, int32_t imm);
void str(SRegister st, MemOperand xn, int32_t imm);
// Aligns the buffer to n (must be a power of 2).
void align(uint8_t n, AlignInstruction instr);
void align(uint8_t n) { align(n, AlignInstruction::kNop); }
// Binds Label l to the current location in the code buffer.
void bind(Label& l);
private:
void b(Condition c, Label& l);
void branch_to_label(uint32_t opcode, BranchType bt, Label& l);
void ld1_st1_multiple_structures(VRegisterList vs, MemOperand xn, int32_t imm, bool load);
void ldr(uint32_t size, uint32_t opc, MemOperand xn, int32_t imm, uint8_t rt_code);
void str(uint32_t size, uint32_t opc, MemOperand xn, int32_t imm, uint8_t rt_code);
void tb_helper(uint32_t op, XRegister xd, uint8_t bit, Label& l);
};
class MacroAssembler : public Assembler {
using Assembler::Assembler;
public:
void f32_hardswish(VRegister sixth, VRegister three, VRegister six,
VRegister zero, const VRegister *accs, size_t num_accs,
const VRegister *tmps, size_t num_tmps);
void Mov(XRegister xd, uint64_t imm);
};
class TrampolineGenerator : public MacroAssembler {
using MacroAssembler::MacroAssembler;
public:
void generate(size_t args_on_stack);
private:
// Helper functions to check that registers match. We keep the expected value inside of x0 and return early once we
// have a mismatch. x0 then becomes the error code, if it is 0, there are no errors.
void CheckRegisterMatch(VRegisterLane actual, Label& exit);
void CheckRegisterMatch(XRegister actual, Label& exit);
};
} // namespace aarch64
} // namespace xnnpack
| 16,880
| 29.471119
| 117
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/aligned-allocator.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <cstddef>
#include <limits>
#include <memory>
#include <type_traits>
#include <utility>
#include <stdlib.h>
#if defined(__ANDROID__) || defined(_WIN32) || defined(__CYGWIN__)
#include <malloc.h>
#endif
template <typename T, size_t Alignment>
class AlignedAllocator;
template <size_t Alignment>
class AlignedAllocator<void, Alignment> {
public:
typedef void* pointer;
typedef const void* const_pointer;
typedef void value_type;
template <class U>
struct rebind {
typedef AlignedAllocator<U, Alignment> other;
};
};
template <typename T, size_t Alignment>
class AlignedAllocator {
public:
typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
#if __cplusplus >= 201402L
typedef std::true_type propagate_on_container_move_assignment;
#endif
template <class U>
struct rebind {
typedef AlignedAllocator<U, Alignment> other;
};
public:
inline AlignedAllocator() noexcept {}
template <class U>
inline AlignedAllocator(
const AlignedAllocator<U, Alignment>& other) noexcept {}
inline size_type max_size() const noexcept {
return (std::numeric_limits<size_type>::max() - size_type(Alignment)) /
sizeof(T);
}
inline pointer address(reference x) const noexcept {
return std::addressof(x);
}
inline const_pointer address(const_reference x) const noexcept {
return std::addressof(x);
}
inline pointer allocate(
size_type n,
typename AlignedAllocator<void, Alignment>::const_pointer hint = 0) {
#if defined(_WIN32)
void* memory = nullptr;
memory = _aligned_malloc(n * sizeof(T), Alignment);
if (memory == 0) {
#if !defined(__GNUC__) && !defined(_MSC_VER) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)
throw std::bad_alloc();
#endif
}
#elif defined(__ANDROID__) || defined(__CYGWIN__)
void* memory = memalign(Alignment, n * sizeof(T));
if (memory == 0) {
#if !defined(__GNUC__) || defined(__EXCEPTIONS)
throw std::bad_alloc();
#endif
}
#else
void* memory = nullptr;
if (posix_memalign(&memory, Alignment, n * sizeof(T)) != 0) {
#if !defined(__GNUC__) || defined(__EXCEPTIONS)
throw std::bad_alloc();
#endif
}
#endif
return static_cast<pointer>(memory);
}
inline void deallocate(pointer p, size_type n) noexcept {
#if defined(_WIN32)
_aligned_free(static_cast<void*>(p));
#else
free(static_cast<void*>(p));
#endif
}
template <class U, class... Args>
inline void construct(U* p, Args&&... args) {
::new (static_cast<void*>(p)) U(std::forward<Args>(args)...);
}
template <class U>
inline void destroy(U* p) {
p->~U();
}
};
| 2,998
| 22.992
| 92
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/allocator.h
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <limits.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#if defined(_MSC_VER)
#include <malloc.h>
#elif !defined(__GNUC__)
#include <alloca.h>
#endif
#include <xnnpack.h>
#include <xnnpack/common.h>
#include <xnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
XNN_INTERNAL extern const struct xnn_allocator xnn_default_allocator;
inline static void* xnn_allocate_memory(size_t memory_size) {
return xnn_params.allocator.allocate(xnn_params.allocator.context, memory_size);
}
inline static void* xnn_allocate_zero_memory(size_t memory_size) {
void* memory_pointer = xnn_params.allocator.allocate(xnn_params.allocator.context, memory_size);
if (memory_pointer != NULL) {
memset(memory_pointer, 0, memory_size);
}
return memory_pointer;
}
inline static void* xnn_reallocate_memory(void* memory_pointer, size_t memory_size) {
return xnn_params.allocator.reallocate(xnn_params.allocator.context, memory_pointer, memory_size);
}
inline static void xnn_release_memory(void* memory_pointer) {
xnn_params.allocator.deallocate(xnn_params.allocator.context, memory_pointer);
}
inline static void* xnn_allocate_simd_memory(size_t memory_size) {
return xnn_params.allocator.aligned_allocate(xnn_params.allocator.context, XNN_ALLOCATION_ALIGNMENT, memory_size);
}
inline static void* xnn_allocate_zero_simd_memory(size_t memory_size) {
void* memory_pointer = xnn_params.allocator.aligned_allocate(
xnn_params.allocator.context, XNN_ALLOCATION_ALIGNMENT, memory_size);
if (memory_pointer != NULL) {
memset(memory_pointer, 0, memory_size);
}
return memory_pointer;
}
inline static void xnn_release_simd_memory(void* memory_pointer) {
xnn_params.allocator.aligned_deallocate(xnn_params.allocator.context, memory_pointer);
}
#if defined(__GNUC__) && defined(__BIGGEST_ALIGNMENT__) && (__BIGGEST_ALIGNMENT__ >= XNN_ALLOCATION_ALIGNMENT)
#define XNN_SIMD_ALLOCA(size) __builtin_alloca((size))
#elif (defined(__clang_major__) && (__clang_major__ >= 4)) || \
(defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 7) && !defined(__INTEL_COMPILER))
// Builtin expects alignment in bits.
#define XNN_SIMD_ALLOCA(size) __builtin_alloca_with_align((size), XNN_ALLOCATION_ALIGNMENT * CHAR_BIT)
#elif defined(__GNUC__)
#define XNN_SIMD_ALLOCA(size) \
((void*) ((((uintptr_t) __builtin_alloca((size) + XNN_ALLOCATION_ALIGNMENT)) | (XNN_ALLOCATION_ALIGNMENT - 1)) + 1))
#elif defined(_MSC_VER)
#define XNN_SIMD_ALLOCA(size) \
((void*) ((((uintptr_t) _alloca((size) + XNN_ALLOCATION_ALIGNMENT)) | (XNN_ALLOCATION_ALIGNMENT - 1)) + 1))
#else
#define XNN_SIMD_ALLOCA(size) \
((void*) ((((uintptr_t) alloca((size) + XNN_ALLOCATION_ALIGNMENT)) | (XNN_ALLOCATION_ALIGNMENT - 1)) + 1))
#endif
#ifdef __cplusplus
} // extern "C"
#endif
| 3,012
| 33.632184
| 120
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/argmaxpool.h
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/common.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_F32_ARGMAXPOOL_UNIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t kernel_elements, \
size_t channels, \
const float** input, \
size_t input_offset, \
float* output, \
uint32_t* index, \
size_t input_increment, \
size_t output_increment);
DECLARE_F32_ARGMAXPOOL_UNIPASS_UKERNEL_FUNCTION(xnn_f32_argmaxpool_ukernel_4x__neon_c4)
DECLARE_F32_ARGMAXPOOL_UNIPASS_UKERNEL_FUNCTION(xnn_f32_argmaxpool_ukernel_4x__scalar_c1)
DECLARE_F32_ARGMAXPOOL_UNIPASS_UKERNEL_FUNCTION(xnn_f32_argmaxpool_ukernel_4x__sse2_c4)
DECLARE_F32_ARGMAXPOOL_UNIPASS_UKERNEL_FUNCTION(xnn_f32_argmaxpool_ukernel_4x__wasmsimd_c4)
DECLARE_F32_ARGMAXPOOL_UNIPASS_UKERNEL_FUNCTION(xnn_f32_argmaxpool_ukernel_9x__neon_c4)
DECLARE_F32_ARGMAXPOOL_UNIPASS_UKERNEL_FUNCTION(xnn_f32_argmaxpool_ukernel_9x__scalar_c1)
DECLARE_F32_ARGMAXPOOL_UNIPASS_UKERNEL_FUNCTION(xnn_f32_argmaxpool_ukernel_9x__sse2_c4)
DECLARE_F32_ARGMAXPOOL_UNIPASS_UKERNEL_FUNCTION(xnn_f32_argmaxpool_ukernel_9x__wasmsimd_c4)
#define DECLARE_F32_ARGMAXPOOL_MULTIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t kernel_elements, \
size_t channels, \
const float** input, \
size_t input_offset, \
float* accumulation_buffer, \
uint32_t* index_buffer, \
float* output, \
uint32_t* index, \
size_t input_increment, \
size_t output_increment);
DECLARE_F32_ARGMAXPOOL_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_argmaxpool_ukernel_9p8x__neon_c4)
DECLARE_F32_ARGMAXPOOL_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_argmaxpool_ukernel_9p8x__scalar_c1)
DECLARE_F32_ARGMAXPOOL_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4)
DECLARE_F32_ARGMAXPOOL_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_argmaxpool_ukernel_9p8x__wasmsimd_c4)
#ifdef __cplusplus
} // extern "C"
#endif
| 2,992
| 45.765625
| 95
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/array-helpers.h
|
#pragma once
#include <algorithm>
#include <array>
#include <cassert>
#include <cstddef>
namespace xnnpack {
namespace internal {
template <typename T, size_t N, typename F, size_t... Indx>
void ArrayApplyImpl(std::array<T, N>&& args, F&& f,
std::integer_sequence<size_t, Indx...> seq) {
f(std::move(args[Indx])...);
}
template <typename T, size_t N, typename F,
typename Indx = std::make_index_sequence<N>>
void ArrayApply(std::array<T, N>&& args, F&& f) {
return ArrayApplyImpl(std::move(args), f, Indx{});
}
template <size_t... Is, typename V>
constexpr std::array<V, sizeof...(Is)> MakeArrayImpl(
V value, std::integer_sequence<size_t, Is...>) {
return {((void)Is, value)...};
}
template <size_t N, typename V>
constexpr std::array<V, N> MakeArray(V value) {
return MakeArrayImpl(value, std::make_index_sequence<N>{});
}
template <typename T>
static constexpr T kDefault{};
template <typename T, size_t max_size>
class ArrayPrefix {
public:
constexpr ArrayPrefix(size_t size, T t)
: size_(size), array_(MakeArray<max_size>(t)) {
assert(size_ <= max_size);
}
explicit constexpr ArrayPrefix(size_t size) : size_(size) {
assert(size_ <= max_size);
}
template <typename Array,
typename = std::enable_if_t<!std::is_integral_v<Array>>>
explicit constexpr ArrayPrefix(Array&& array) : ArrayPrefix({}) {
for (const auto& v : array) {
push_back(v);
}
}
constexpr ArrayPrefix(std::initializer_list<T> init)
: ArrayPrefix(init.size(), kDefault<T>) {
assert(size_ <= max_size);
std::copy(init.begin(), init.end(), begin());
}
auto begin() { return array_.begin(); }
auto begin() const { return array_.cbegin(); }
auto end() {
auto result = array_.begin();
std::advance(result, size_);
return result;
}
auto end() const {
auto result = array_.cbegin();
std::advance(result, size_);
return result;
}
auto& operator[](size_t index) {
assert(index < size_);
return array_[index];
}
const auto& operator[](size_t index) const {
assert(index < size_);
return array_[index];
}
void push_back(const T& t) {
assert(size_ + 1 <= max_size);
array_[size_++] = t;
}
size_t size() const { return size_; }
private:
size_t size_;
std::array<T, max_size> array_;
};
} // namespace internal
} // namespace xnnpack
| 2,396
| 24.5
| 68
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/assembler.h
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <xnnpack/memory.h>
#include <array>
#include <cstdint>
typedef uint8_t byte;
namespace xnnpack {
constexpr size_t kInstructionSizeInBytes = 4;
constexpr size_t kInstructionSizeInBytesLog2 = 2;
enum class Error {
kNoError,
kOutOfMemory,
kInvalidOperand,
kLabelAlreadyBound,
kLabelOffsetOutOfBounds,
kLabelHasTooManyUsers,
kInvalidLaneIndex,
kInvalidRegisterListLength,
kFinalizeCodeMemoryFail,
kUnimplemented,
};
// Biggest user of label is for callee-saved registers check in test mode.
constexpr size_t max_label_users = 16;
// Label is a target of a branch. You call Assembler::bind to bind a label to an
// actual location in the instruction stream.
//
// ```
// Label l;
// b(kAl, l1); // branch to an unbound label is fine, it will be patched later.
// a.bind(l); // binds label to this location in the instruction stream.
// b(kAl, l1); // branch to an already bound label.
// ```
struct Label {
// Location of label within Assembler buffer.
byte* offset = nullptr;
// A label can only be bound once, binding it again leads to an error.
bool bound = (offset != nullptr);
// All users of this label, recorded by their offset in the Assembler buffer.
std::array<byte*, max_label_users> users{{0}};
size_t num_users = 0;
// Records a user (e.g. branch instruction) of this label.
// Returns true if success, false if number of users exceeds maximum.
bool add_use(byte* offset) {
if (num_users >= max_label_users) {
return false;
}
users[num_users++] = offset;
return true;
}
};
class AssemblerBase {
public:
// Takes an xnn_code_buffer with a pointer to allocated memory. If the buffer
// already contains content (size != 0), appends to after size (up to capacity).
explicit AssemblerBase(xnn_code_buffer* buf);
// Write value into the code buffer and advances cursor_.
void emit32(uint32_t value);
void emit8(byte value);
// Finish assembly of code, this should be the last function called on an
// instance of Assembler. Returns a pointer to the start of code region.
void* finalize();
// Reset the assembler state (no memory is freed).
void reset();
// Get a pointer to the start of code buffer.
const byte* start() const { return buffer_; }
const byte* offset() const { return cursor_; }
template<typename T>
T offset() const { return reinterpret_cast<T>(cursor_); }
// Returns the number of bytes of code actually in the buffer.
size_t code_size_in_bytes() const { return (cursor_ - buffer_); }
Error error() const { return error_; }
protected:
// Pointer into code buffer to start writing code.
byte* buffer_;
// Pointer to current position in code buffer.
byte* cursor_;
// Pointer to out-of-bounds of code buffer.
byte* top_;
// Errors encountered while assembling code.
Error error_ = Error::kNoError;
// Holds an xnn_code_buffer, will write code to its code pointer, and unmap
// unused pages on finalizing.
xnn_code_buffer* xnn_buffer = nullptr;
private:
template <typename Value>
void emit(Value value) {
if (error_ != Error::kNoError) {
return;
}
if (cursor_ + sizeof(value) > top_) {
error_ = Error::kOutOfMemory;
return;
}
memcpy(cursor_, &value, sizeof(value));
cursor_ += sizeof(value);
}
};
} // namespace xnnpack
| 3,515
| 28.546218
| 82
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/avgpool.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_F16_AVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t kernel_elements, \
size_t channels, \
const void** input, \
size_t input_offset, \
const void* zero, \
void* buffer, \
void* output, \
size_t input_increment, \
size_t output_increment, \
const union xnn_f16_scaleminmax_params* params);
DECLARE_F16_AVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_avgpool_minmax_ukernel_9p8x__f16c_c8)
DECLARE_F16_AVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_avgpool_minmax_ukernel_9p8x__neonfp16arith_c8)
#define DECLARE_F16_AVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t kernel_elements, \
size_t channels, \
const void** input, \
size_t input_offset, \
const void* zero, \
void* output, \
size_t input_increment, \
size_t output_increment, \
const union xnn_f16_scaleminmax_params* params);
DECLARE_F16_AVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_avgpool_minmax_ukernel_9x__f16c_c8)
DECLARE_F16_AVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_avgpool_minmax_ukernel_9x__neonfp16arith_c8)
#define DECLARE_F32_AVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t kernel_elements, \
size_t channels, \
const float** input, \
size_t input_offset, \
const float* zero, \
float* buffer, \
float* output, \
size_t input_increment, \
size_t output_increment, \
const union xnn_f32_scaleminmax_params* params);
DECLARE_F32_AVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_avgpool_minmax_ukernel_9p8x__neon_c4)
DECLARE_F32_AVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_avgpool_minmax_ukernel_9p8x__scalar_c1)
DECLARE_F32_AVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_avgpool_minmax_ukernel_9p8x__sse_c4)
DECLARE_F32_AVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_avgpool_minmax_ukernel_9p8x__wasm_c1)
DECLARE_F32_AVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4)
DECLARE_F32_AVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4)
#define DECLARE_F32_AVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t kernel_elements, \
size_t channels, \
const float** input, \
size_t input_offset, \
const float* zero, \
float* output, \
size_t input_increment, \
size_t output_increment, \
const union xnn_f32_scaleminmax_params* params);
DECLARE_F32_AVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_avgpool_minmax_ukernel_9x__neon_c4)
DECLARE_F32_AVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_avgpool_minmax_ukernel_9x__scalar_c1)
DECLARE_F32_AVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_avgpool_minmax_ukernel_9x__sse_c4)
DECLARE_F32_AVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_avgpool_minmax_ukernel_9x__wasm_c1)
DECLARE_F32_AVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_avgpool_minmax_ukernel_9x__wasmsimd_arm_c4)
DECLARE_F32_AVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_avgpool_minmax_ukernel_9x__wasmsimd_x86_c4)
#define DECLARE_QU8_AVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t kernel_elements, \
size_t channels, \
const uint8_t** input, \
size_t input_offset, \
const uint8_t* zero, \
int32_t* buffer, \
uint8_t* output, \
size_t input_increment, \
size_t output_increment, \
const union xnn_qu8_avgpool_minmax_params* params);
DECLARE_QU8_AVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_avgpool_minmax_fp32_ukernel_9p8x__neon_c8)
DECLARE_QU8_AVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_avgpool_minmax_fp32_ukernel_9p8x__scalar_imagic_c1)
DECLARE_QU8_AVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_avgpool_minmax_fp32_ukernel_9p8x__sse2_c8)
#define DECLARE_QU8_AVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t kernel_elements, \
size_t channels, \
const uint8_t** input, \
size_t input_offset, \
const uint8_t* zero, \
uint8_t* output, \
size_t input_increment, \
size_t output_increment, \
const union xnn_qu8_avgpool_minmax_params* params);
DECLARE_QU8_AVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_avgpool_minmax_fp32_ukernel_9x__neon_c8)
DECLARE_QU8_AVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_avgpool_minmax_fp32_ukernel_9x__scalar_imagic_c1)
DECLARE_QU8_AVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_avgpool_minmax_fp32_ukernel_9x__sse2_c8)
#ifdef __cplusplus
} // extern "C"
#endif
| 8,026
| 56.335714
| 113
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/cache.h
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h> // For size_t.
#include <stdint.h> // For uint32_t.
#include <xnnpack.h> // For xnn_status.
#include <xnnpack/common.h> // For XNN_INLINE.
#include <xnnpack/memory.h> // For xnn_code_buffer.
#include <xnnpack/mutex.h> // For xnn_mutex.
#ifdef __cplusplus
extern "C" {
#endif
#define XNN_CACHE_NOT_FOUND SIZE_MAX // Return value when code is not found in the cache.
// A cache for arbitrary bytes.
// The implementation is similar to a hash table with open addressing and linear
// probing, but restricted to our use cases.
// Similar to buckets in a hash table implementation, this is an entry in the
// cache. It stores "metadata" about the generated code (size and offset). The
// actual bytes are in the cache's buffer.
struct xnn_cache_bucket {
// A hash for quick comparison.
uint32_t hash;
// Size of bytes.
size_t size;
// Offset of bytes, relative to cache's buffer.
size_t offset;
};
enum xnn_cache_type {
xnn_cache_type_invalid = 0,
xnn_cache_type_code,
xnn_cache_type_weights,
};
struct xnn_cache {
enum xnn_cache_type type;
// A growing buffer that is used to keep all generated code or repacked weights.
union {
struct xnn_code_buffer code;
struct xnn_weights_buffer weights;
};
// Entries in the cache.
struct xnn_cache_bucket* buckets;
// Capacity of the cache, when the load factor (num_entries/num_buckets) grows
// beyond a limit, the cache is expanded.
size_t num_buckets;
size_t num_entries;
size_t hits;
size_t misses;
};
// A cache for JIT generated microkernel code.
struct xnn_code_cache {
struct xnn_cache cache;
};
enum xnn_status xnn_init_code_cache(struct xnn_code_cache* cache);
enum xnn_status xnn_release_code_cache(struct xnn_code_cache* cache);
// Looks up `ptr` in the cache, returns offset into cache's buffer if found.
// `ptr` should already point into cache->buffer.
// If it already exists within the cache, the buffer will be rewound, so we can
// reuse the same section of the buffer.
size_t xnn_get_or_insert_code_cache(struct xnn_code_cache* cache, void* ptr, size_t size);
XNN_INLINE static bool xnn_code_cache_valid(struct xnn_code_cache* code_cache) {
return code_cache != NULL && code_cache->cache.type == xnn_cache_type_code;
}
// The state of weights cache finalization.
enum xnn_cache_state {
// Not finalized.
xnn_cache_state_not_finalized,
// The underlying memory is trimmed to be as compact as possible.
xnn_cache_state_hard_finalized,
// The underlying memory has some extra space at the end.
xnn_cache_state_soft_finalized,
};
// A cache for repacked weights.
struct xnn_weights_cache {
struct xnn_cache cache;
// Protects updates of `cache`, it has the same lifetime as `cache`, and so should be initialized/destroyed together
// with the `cache`.
struct xnn_mutex mutex;
// Maximum size of packed weights that have been inserted into the cache.
size_t max_weights_size;
enum xnn_cache_state finalization_state;
};
enum xnn_status xnn_init_weights_cache(struct xnn_weights_cache* cache);
enum xnn_status xnn_init_weights_cache_with_size(struct xnn_weights_cache* cache, size_t size);
// Finalizes the weights cache, so that we cannot insert any more entries into the cache.
enum xnn_status xnn_finalize_weights_cache(
struct xnn_weights_cache* cache,
enum xnn_weights_cache_finalization_kind finalization_kind);
enum xnn_status xnn_release_weights_cache(struct xnn_weights_cache* cache);
// Ensures that cache has enough space for `n` bytes, locks the mutex to protect future updates. Mutex must be unlocked
// using xnn_get_or_insert_weights_cache.
void* xnn_reserve_space_in_weights_cache(struct xnn_weights_cache* cache, size_t n);
// Looks up packed weights at `ptr` in the cache. If it is found, reuse it. Otherwise, it is added to the cache. Mutex
// must already be locked before calling this, it will be unlocked at the end of this function.
size_t xnn_get_or_insert_weights_cache(struct xnn_weights_cache* cache, void* ptr, size_t size);
bool xnn_weights_cache_is_finalized(struct xnn_weights_cache* cache);
#ifdef __cplusplus
} // extern "C"
#endif
| 4,355
| 36.230769
| 119
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/common.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#if defined(__APPLE__)
#include <TargetConditionals.h>
#endif
// Define architecture identification macros
#if defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) || defined(_M_IX86)
#define XNN_ARCH_X86 1
#else
#define XNN_ARCH_X86 0
#endif
#if defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) && !defined(_M_ARM64EC)
#define XNN_ARCH_X86_64 1
#else
#define XNN_ARCH_X86_64 0
#endif
#if defined(__arm__) || defined(_M_ARM)
#define XNN_ARCH_ARM 1
#else
#define XNN_ARCH_ARM 0
#endif
#if defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)
#define XNN_ARCH_ARM64 1
#else
#define XNN_ARCH_ARM64 0
#endif
#if defined(__PPC64__) || defined(__ppc64__) || defined(__powerpc64__) || defined(_ARCH_PPC64)
#define XNN_ARCH_PPC64 1
#else
#define XNN_ARCH_PPC64 0
#endif
#if defined(__riscv) || defined(__riscv__)
#define XNN_ARCH_RISCV 1
#else
#define XNN_ARCH_RISCV 0
#endif
#if defined(__hexagon__)
#define XNN_ARCH_HEXAGON 1
#else
#define XNN_ARCH_HEXAGON 0
#endif
#if defined(__wasm__)
#if defined(__wasm_relaxed_simd__)
#define XNN_ARCH_WASM 0
#define XNN_ARCH_WASMSIMD 0
#define XNN_ARCH_WASMRELAXEDSIMD 1
#elif defined(__wasm_simd128__)
#define XNN_ARCH_WASM 0
#define XNN_ARCH_WASMSIMD 1
#define XNN_ARCH_WASMRELAXEDSIMD 0
#else
#define XNN_ARCH_WASM 1
#define XNN_ARCH_WASMSIMD 0
#define XNN_ARCH_WASMRELAXEDSIMD 0
#endif
#else
#define XNN_ARCH_WASM 0
#define XNN_ARCH_WASMSIMD 0
#define XNN_ARCH_WASMRELAXEDSIMD 0
#endif
// Define platform identification macros
#if defined(__ANDROID__)
#define XNN_PLATFORM_ANDROID 1
#else
#define XNN_PLATFORM_ANDROID 0
#endif
#if defined(__linux__)
#define XNN_PLATFORM_LINUX 1
#else
#define XNN_PLATFORM_LINUX 0
#endif
#if defined(__APPLE__) && TARGET_OS_IPHONE
// iOS on iPhone / iPad Touch, iPad OS, watchOS, or tvOS
#define XNN_PLATFORM_IOS 1
#else
#define XNN_PLATFORM_IOS 0
#endif
#if defined(__APPLE__) && TARGET_OS_MAC
#define XNN_PLATFORM_MAC 1
#else
#define XNN_PLATFORM_MAC 0
#endif
#if XNN_PLATFORM_ANDROID || XNN_PLATFORM_IOS
#define XNN_PLATFORM_MOBILE 1
#else
#define XNN_PLATFORM_MOBILE 0
#endif
#if defined(__EMSCRIPTEN__) || defined(__wasm__)
#define XNN_PLATFORM_WEB 1
#else
#define XNN_PLATFORM_WEB 0
#endif
#if defined(_WIN32)
#define XNN_PLATFORM_WINDOWS 1
#else
#define XNN_PLATFORM_WINDOWS 0
#endif
#if defined(__Fuchsia__)
#define XNN_PLATFORM_FUCHSIA 1
#else
#define XNN_PLATFORM_FUCHSIA 0
#endif
#if defined(__hexagon__) && !defined(__linux__)
#define XNN_PLATFORM_QURT 1
#else
#define XNN_PLATFORM_QURT 0
#endif
#ifndef XNN_PLATFORM_JIT
#if (XNN_ARCH_ARM || XNN_ARCH_ARM64) && !XNN_PLATFORM_IOS && !XNN_PLATFORM_FUCHSIA || XNN_PLATFORM_WEB
#define XNN_PLATFORM_JIT 1
#else
#define XNN_PLATFORM_JIT 0
#endif
#endif // XNN_PLATFORM_JIT
// Define compile identification macros
#if defined(__clang__)
#define XNN_COMPILER_CLANG 1
#elif defined(__INTEL_COMPILER)
#define XNN_COMPILER_ICC 1
#elif defined(_MSC_VER)
#define XNN_COMPILER_MSVC 1
#elif defined(__GNUC__)
#define XNN_COMPILER_GCC 1
#endif
#ifndef XNN_COMPILER_CLANG
#define XNN_COMPILER_CLANG 0
#endif
#ifndef XNN_COMPILER_GCC
#define XNN_COMPILER_GCC 0
#endif
#ifndef XNN_COMPILER_MSVC
#define XNN_COMPILER_MSVC 0
#endif
#ifndef XNN_COMPILER_ICC
#define XNN_COMPILER_ICC 0
#endif
#ifndef XNN_TEST_MODE
#define XNN_TEST_MODE 0
#endif
#ifndef XNN_MAX_UARCH_TYPES
#if (XNN_ARCH_ARM || XNN_ARCH_ARM64) && !XNN_PLATFORM_IOS
#define XNN_MAX_UARCH_TYPES 3
#else
#define XNN_MAX_UARCH_TYPES 1
#endif
#endif
#define XNN_UARCH_DEFAULT 0
#if defined(__has_builtin)
#define XNN_COMPILER_HAS_BUILTIN(builtin) __has_builtin(builtin)
#else
#define XNN_COMPILER_HAS_BUILTIN(builtin) 0
#endif
#if defined(__has_feature)
#define XNN_COMPILER_HAS_FEATURE(builtin) __has_feature(builtin)
#else
#define XNN_COMPILER_HAS_FEATURE(builtin) 0
#endif
#if defined(__GNUC__)
#if defined(__clang__) || (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 5)
#define XNN_UNREACHABLE do { __builtin_unreachable(); } while (0)
#else
#define XNN_UNREACHABLE do { __builtin_trap(); } while (0)
#endif
#elif defined(_MSC_VER)
#define XNN_UNREACHABLE __assume(0)
#else
#define XNN_UNREACHABLE do { } while (0)
#endif
#if defined(__GNUC__)
#define XNN_ALIGN(alignment) __attribute__((__aligned__(alignment)))
#elif defined(_MSC_VER)
#define XNN_ALIGN(alignment) __declspec(align(alignment))
#else
#error "Platform-specific implementation of XNN_ALIGN required"
#endif
#if defined(__GNUC__)
#define XNN_UNALIGNED __attribute__((__aligned__(1)))
#elif defined(_MSC_VER)
#if defined(_M_IX86)
#define XNN_UNALIGNED
#else
#define XNN_UNALIGNED __unaligned
#endif
#else
#error "Platform-specific implementation of XNN_UNALIGNED required"
#endif
#define XNN_COUNT_OF(array) (sizeof(array) / sizeof(0[array]))
#if defined(__cplusplus) || XNN_COMPILER_MSVC || XNN_COMPILER_CLANG
// static as array indices in function parameter declaration is a C99 feature, not supported in C++.
// MSVC does not support this feature, even in C mode.
// Clang generates suboptimal code, see https://github.com/llvm/llvm-project/issues/59120
#define XNN_MIN_ELEMENTS(count) count
#else
#define XNN_MIN_ELEMENTS(count) static count
#endif
#if defined(__GNUC__)
#define XNN_LIKELY(condition) (__builtin_expect(!!(condition), 1))
#define XNN_UNLIKELY(condition) (__builtin_expect(!!(condition), 0))
#else
#define XNN_LIKELY(condition) (!!(condition))
#define XNN_UNLIKELY(condition) (!!(condition))
#endif
#if XNN_COMPILER_HAS_BUILTIN(__builtin_unpredictable)
#define XNN_UNPREDICTABLE(condition) (__builtin_unpredictable(!!(condition)))
#elif defined(__GNUC__) && (__GNUC__ >= 9) && !defined(__INTEL_COMPILER)
#define XNN_UNPREDICTABLE(condition) (__builtin_expect_with_probability(!!(condition), 0, 0.5))
#else
#define XNN_UNPREDICTABLE(condition) (!!(condition))
#endif
#if XNN_COMPILER_HAS_FEATURE(thread_sanitizer)
#define XNN_DISABLE_TSAN __attribute__((__no_sanitize__("thread")))
#else
#define XNN_DISABLE_TSAN
#endif
#if XNN_COMPILER_HAS_FEATURE(memory_sanitizer)
#define XNN_DISABLE_MSAN __attribute__((__no_sanitize__("memory")))
#else
#define XNN_DISABLE_MSAN
#endif
#if XNN_COMPILER_HAS_FEATURE(hwaddress_sanitizer)
#define XNN_DISABLE_HWASAN __attribute__((__no_sanitize__("hwaddress")))
#else
#define XNN_DISABLE_HWASAN
#endif
#define XNN_OOB_READS XNN_DISABLE_TSAN XNN_DISABLE_MSAN XNN_DISABLE_HWASAN
#if defined(__GNUC__)
#define XNN_INTRINSIC inline __attribute__((__always_inline__, __artificial__))
#elif defined(_MSC_VER)
#define XNN_INTRINSIC __forceinline
#else
#define XNN_INTRINSIC inline
#endif
#if defined(__GNUC__)
#define XNN_INLINE inline __attribute__((__always_inline__))
#elif defined(_MSC_VER)
#define XNN_INLINE __forceinline
#else
#define XNN_INLINE inline
#endif
#ifndef XNN_INTERNAL
#if defined(__ELF__)
#define XNN_INTERNAL __attribute__((__visibility__("internal")))
#elif defined(__MACH__)
#define XNN_INTERNAL __attribute__((__visibility__("hidden")))
#else
#define XNN_INTERNAL
#endif
#endif
#ifndef XNN_PRIVATE
#if defined(__ELF__)
#define XNN_PRIVATE __attribute__((__visibility__("hidden")))
#elif defined(__MACH__)
#define XNN_PRIVATE __attribute__((__visibility__("hidden")))
#else
#define XNN_PRIVATE
#endif
#endif
#if defined(__clang__)
#define XNN_PRAGMA_CLANG(pragma) _Pragma(pragma)
#else
#define XNN_PRAGMA_CLANG(pragma)
#endif
#if XNN_ARCH_WASM
#define XNN_ALLOCATION_ALIGNMENT 4
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
#if XNN_PLATFORM_MOBILE
#define XNN_ALLOCATION_ALIGNMENT 32
#else
#define XNN_ALLOCATION_ALIGNMENT 64
#endif
#else
#define XNN_ALLOCATION_ALIGNMENT 16
#endif
// Number of extra elements to allocate for DWCONV accumulators/buffers.
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
// For AVX512.
#define XNN_MAX_SIMD_SIZE 64
#elif XNN_ARCH_RISCV || XNN_ARCH_WASM
// Scalable vectors, assume masked loads and stores.
// Wasm without SIMD.
#define XNN_MAX_SIMD_SIZE 0
#elif XNN_ARCH_HEXAGON
#define XNN_MAX_SIMD_SIZE 128
#else
// XNN_ARCH_ARM, XNN_ARCH_ARM64, XNN_ARCH_WASMSIMD, XNN_ARCH_WASMRELAXEDSIMD.
#define XNN_MAX_SIMD_SIZE 16
#endif
// Use constant here to avoid dependency on xnnpack.h
#if XNN_MAX_SIMD_SIZE >= 16
#define XNN_MULTIPASS_EXTRA_BYTES XNN_MAX_SIMD_SIZE
#else
#define XNN_MULTIPASS_EXTRA_BYTES 16
#endif
#define XNN_LOG2_SIZEOF_INT8_T 0 // log2(sizeof(int8_t))
#define XNN_LOG2_SIZEOF_UINT8_T 0 // log2(sizeof(uint8_t))
#define XNN_LOG2_SIZEOF_INT16_T 1 // log2(sizeof(int16_t))
#define XNN_LOG2_SIZEOF_UINT16_T 1 // log2(sizeof(uint16_t))
#define XNN_LOG2_SIZEOF_HALF 1 // log2(sizeof(half))
#define XNN_LOG2_SIZEOF_FLOAT 2 // log2(sizeof(float))
#define XNN_LOG2_SIZEOF_INT32_T 2 // log2(sizeof(int32_t))
#define XNN_LOG2_SIZEOF_UINT32_T 2 // log2(sizeof(uint32_t))
| 9,330
| 24.634615
| 104
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/conv.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t input_height, \
size_t input_width, \
size_t output_y_start, \
size_t output_y_end, \
const float* input, \
const float* zero, \
const float* weights, \
float* output, \
size_t input_padding_top, \
size_t output_channels, \
size_t output_height_stride, \
size_t output_width_stride, \
const union xnn_f32_minmax_params* params);
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__aarch64_neonfma_2x1)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__aarch64_neonfma_2x2)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neon_2x1)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neon_2x2)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__scalar_1x1)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__aarch64_neonfma_2x1)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__aarch64_neonfma_2x2)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neon_2x1)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neon_2x2)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p0p1c3x4__aarch64_neonfma_2x1)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p0p1c3x4__aarch64_neonfma_2x2)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p0p1c3x4__neon_2x1)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p0p1c3x4__neon_2x2)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p0p1c3x4__scalar_1x1)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p0p1c3x8__aarch64_neonfma_2x1)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p0p1c3x8__aarch64_neonfma_2x2)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p0p1c3x8__neon_2x1)
DECLARE_F32_CONV_HWC_UKERNEL_FUNCTION(xnn_f32_conv_hwc_ukernel_3x3s2p0p1c3x8__neon_2x2)
#define DECLARE_F32_CONV_HWC2CHW_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t input_height, \
size_t input_width, \
size_t output_y_start, \
size_t output_y_end, \
const float* input, \
const float* zero, \
const float* weights, \
float* output, \
size_t input_padding_top, \
size_t output_channels, \
size_t output_height_stride, \
size_t output_channel_stride, \
const union xnn_f32_minmax_params* params);
DECLARE_F32_CONV_HWC2CHW_UKERNEL_FUNCTION(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__aarch64_neonfma_2x2)
DECLARE_F32_CONV_HWC2CHW_UKERNEL_FUNCTION(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2)
DECLARE_F32_CONV_HWC2CHW_UKERNEL_FUNCTION(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__scalar_1x1)
DECLARE_F32_CONV_HWC2CHW_UKERNEL_FUNCTION(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__sse_1x1)
DECLARE_F32_CONV_HWC2CHW_UKERNEL_FUNCTION(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__sse_2x2)
DECLARE_F32_CONV_HWC2CHW_UKERNEL_FUNCTION(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__wasmsimd_2x2)
#define DECLARE_F16_CONV_HWC2CHW_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t input_height, \
size_t input_width, \
size_t output_y_start, \
size_t output_y_end, \
const void* input, \
const void* zero, \
const void* weights, \
void* output, \
size_t input_padding_top, \
size_t output_channels, \
size_t output_height_stride, \
size_t output_channel_stride, \
const union xnn_f16_minmax_params* params);
DECLARE_F16_CONV_HWC2CHW_UKERNEL_FUNCTION(xnn_f16_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfp16arith_2x2)
#ifdef __cplusplus
} // extern "C"
#endif
| 5,501
| 52.417476
| 104
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/fft.h
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/common.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_CS16_BFLY4_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t batch, \
size_t samples, \
int16_t* data, \
const int16_t* twiddle, \
size_t stride);
DECLARE_CS16_BFLY4_UKERNEL_FUNCTION(xnn_cs16_bfly4_samples1_ukernel__asm_aarch32_neon_x1)
DECLARE_CS16_BFLY4_UKERNEL_FUNCTION(xnn_cs16_bfly4_samples1_ukernel__asm_aarch32_neon_x2)
DECLARE_CS16_BFLY4_UKERNEL_FUNCTION(xnn_cs16_bfly4_samples1_ukernel__asm_aarch32_neon_x4)
DECLARE_CS16_BFLY4_UKERNEL_FUNCTION(xnn_cs16_bfly4_samples1_ukernel__neon)
DECLARE_CS16_BFLY4_UKERNEL_FUNCTION(xnn_cs16_bfly4_samples1_ukernel__scalar)
DECLARE_CS16_BFLY4_UKERNEL_FUNCTION(xnn_cs16_bfly4_samples4_ukernel__neon)
DECLARE_CS16_BFLY4_UKERNEL_FUNCTION(xnn_cs16_bfly4_samples4_ukernel__scalar)
DECLARE_CS16_BFLY4_UKERNEL_FUNCTION(xnn_cs16_bfly4_ukernel__neon_x1)
DECLARE_CS16_BFLY4_UKERNEL_FUNCTION(xnn_cs16_bfly4_ukernel__neon_x4)
DECLARE_CS16_BFLY4_UKERNEL_FUNCTION(xnn_cs16_bfly4_ukernel__scalar_x1)
DECLARE_CS16_BFLY4_UKERNEL_FUNCTION(xnn_cs16_bfly4_ukernel__scalar_x2)
DECLARE_CS16_BFLY4_UKERNEL_FUNCTION(xnn_cs16_bfly4_ukernel__scalar_x4)
#define DECLARE_CS16_FFTR_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t samples, \
int16_t* data, \
const int16_t* twiddle);
DECLARE_CS16_FFTR_UKERNEL_FUNCTION(xnn_cs16_fftr_ukernel__asm_aarch32_neon_x1)
DECLARE_CS16_FFTR_UKERNEL_FUNCTION(xnn_cs16_fftr_ukernel__asm_aarch32_neon_x4)
DECLARE_CS16_FFTR_UKERNEL_FUNCTION(xnn_cs16_fftr_ukernel__neon_x4)
DECLARE_CS16_FFTR_UKERNEL_FUNCTION(xnn_cs16_fftr_ukernel__scalar_x1)
DECLARE_CS16_FFTR_UKERNEL_FUNCTION(xnn_cs16_fftr_ukernel__scalar_x2)
DECLARE_CS16_FFTR_UKERNEL_FUNCTION(xnn_cs16_fftr_ukernel__scalar_x4)
#ifdef __cplusplus
} // extern "C"
#endif
| 2,289
| 40.636364
| 89
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/fill.h
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/common.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_FILL_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t kernel_elements, \
size_t channels, \
void* output, \
size_t output_stride, \
const uint32_t fill_pattern);
DECLARE_FILL_UKERNEL_FUNCTION(xnn_xx_fill_ukernel__neon_x64)
DECLARE_FILL_UKERNEL_FUNCTION(xnn_xx_fill_ukernel__scalar_x16)
DECLARE_FILL_UKERNEL_FUNCTION(xnn_xx_fill_ukernel__sse2_x64)
DECLARE_FILL_UKERNEL_FUNCTION(xnn_xx_fill_ukernel__wasmsimd_x64)
#ifdef __cplusplus
} // extern "C"
#endif
| 914
| 25.142857
| 72
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/filterbank.h
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/common.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_U32_FILTERBANK_ACCUMULATE_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t rows, \
const uint32_t* input, \
const uint8_t* weight_widths, \
const uint16_t* weights, \
uint64_t* output);
DECLARE_U32_FILTERBANK_ACCUMULATE_UKERNEL_FUNCTION(xnn_u32_filterbank_accumulate_ukernel__asm_aarch32_arm_x1)
DECLARE_U32_FILTERBANK_ACCUMULATE_UKERNEL_FUNCTION(xnn_u32_filterbank_accumulate_ukernel__asm_aarch32_neon_x1)
DECLARE_U32_FILTERBANK_ACCUMULATE_UKERNEL_FUNCTION(xnn_u32_filterbank_accumulate_ukernel__asm_aarch32_neon_x2)
DECLARE_U32_FILTERBANK_ACCUMULATE_UKERNEL_FUNCTION(xnn_u32_filterbank_accumulate_ukernel__neon_x1)
DECLARE_U32_FILTERBANK_ACCUMULATE_UKERNEL_FUNCTION(xnn_u32_filterbank_accumulate_ukernel__neon_x2)
DECLARE_U32_FILTERBANK_ACCUMULATE_UKERNEL_FUNCTION(xnn_u32_filterbank_accumulate_ukernel__scalar_x1)
#define DECLARE_U32_FILTERBANK_SUBTRACT_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t batch_size, \
const uint32_t* input, \
uint32_t smoothing, \
uint32_t alternate_smoothing, \
uint32_t one_minus_smoothing, \
uint32_t alternate_one_minus_smoothing, \
uint32_t min_signal_remaining, \
uint32_t smoothing_bits, \
uint32_t spectral_subtraction_bits, \
uint32_t* noise_estimate, \
uint32_t* output);
DECLARE_U32_FILTERBANK_SUBTRACT_UKERNEL_FUNCTION(xnn_u32_filterbank_subtract_ukernel__scalar_x2)
#ifdef __cplusplus
} // extern "C"
#endif
| 2,349
| 42.518519
| 110
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/gavgpool.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_F32_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t rows, \
size_t channels, \
const float* input, \
size_t input_stride, \
const float* zero, \
float* buffer, \
float* output, \
const union xnn_f32_scaleminmax_params* params);
DECLARE_F32_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_gavgpool_minmax_ukernel_7p7x__neon_c4)
DECLARE_F32_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_gavgpool_minmax_ukernel_7p7x__scalar_c1)
DECLARE_F32_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_gavgpool_minmax_ukernel_7p7x__sse_c4)
DECLARE_F32_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_gavgpool_minmax_ukernel_7p7x__wasm_c1)
DECLARE_F32_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_arm_c4)
DECLARE_F32_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_x86_c4)
#define DECLARE_F32_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t rows, \
size_t channels, \
const float* input, \
size_t input_stride, \
const float* zero, \
float* output, \
const union xnn_f32_scaleminmax_params* params);
DECLARE_F32_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_gavgpool_minmax_ukernel_7x__neon_c4)
DECLARE_F32_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_gavgpool_minmax_ukernel_7x__scalar_c1)
DECLARE_F32_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_gavgpool_minmax_ukernel_7x__sse_c4)
DECLARE_F32_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_gavgpool_minmax_ukernel_7x__wasm_c1)
DECLARE_F32_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_gavgpool_minmax_ukernel_7x__wasmsimd_arm_c4)
DECLARE_F32_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_gavgpool_minmax_ukernel_7x__wasmsimd_x86_c4)
#define DECLARE_F16_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t rows, \
size_t channels, \
const void* input, \
size_t input_stride, \
const void* zero, \
void* buffer, \
void* output, \
const union xnn_f16_scaleminmax_params* params);
DECLARE_F16_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c8)
DECLARE_F16_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c16)
DECLARE_F16_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c24)
DECLARE_F16_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c32)
DECLARE_F16_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8)
DECLARE_F16_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16)
DECLARE_F16_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24)
DECLARE_F16_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32)
#define DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t rows, \
size_t channels, \
const void* input, \
size_t input_stride, \
const void* zero, \
void* output, \
const union xnn_f16_scaleminmax_params* params);
DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7x__neonfp16arith_c8)
DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7x__neonfp16arith_c16)
DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7x__neonfp16arith_c24)
DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7x__neonfp16arith_c32)
DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8)
DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16)
DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24)
DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32)
#define DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t rows, \
size_t channels, \
const int8_t* input, \
size_t input_stride, \
const int8_t* zero, \
int32_t* buffer, \
int8_t* output, \
const union xnn_qs8_avgpool_minmax_params* params);
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neon_c8)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neon_c16)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neon_c24)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neon_c32)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c8)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c16)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c24)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c32)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c8)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c16)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c24)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c32)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c8)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c16)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c24)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c8)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c16)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c24)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c8)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c16)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c24)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c32)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c2)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c1)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c4)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c1)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c2)
DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c4)
#define DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t rows, \
size_t channels, \
const int8_t* input, \
size_t input_stride, \
const int8_t* zero, \
int8_t* output, \
const union xnn_qs8_avgpool_minmax_params* params);
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neon_c8)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neon_c16)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neon_c24)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neon_c32)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_rndnu_ukernel_7x__neon_c8)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_rndnu_ukernel_7x__neon_c16)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_rndnu_ukernel_7x__neon_c24)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_rndnu_ukernel_7x__neon_c32)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c8)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c16)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c24)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c32)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse2_c8)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse2_c16)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse2_c24)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse41_c8)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse41_c16)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse41_c24)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c8)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c16)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c24)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c32)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c1)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c2)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c1)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c2)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c4)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c1)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c2)
DECLARE_QS8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c4)
#define DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t rows, \
size_t channels, \
const uint8_t* input, \
size_t input_stride, \
const uint8_t* zero, \
int32_t* buffer, \
uint8_t* output, \
const union xnn_qu8_avgpool_minmax_params* params);
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neon_c8)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neon_c16)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neon_c24)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neon_c32)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c8)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c16)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c24)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c32)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c8)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c16)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c24)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c32)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c8)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c16)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c24)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c8)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c16)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c24)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c8)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c16)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c24)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c32)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c2)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c1)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c4)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c1)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c2)
DECLARE_QU8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c4)
#define DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t rows, \
size_t channels, \
const uint8_t* input, \
size_t input_stride, \
const uint8_t* zero, \
uint8_t* output, \
const union xnn_qu8_avgpool_minmax_params* params);
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neon_c8)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neon_c16)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neon_c24)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neon_c32)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_rndnu_ukernel_7x__neon_c8)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_rndnu_ukernel_7x__neon_c16)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_rndnu_ukernel_7x__neon_c24)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_rndnu_ukernel_7x__neon_c32)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c8)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c16)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c24)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c32)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__sse2_c8)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__sse2_c16)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__sse2_c24)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__sse41_c8)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__sse41_c16)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__sse41_c24)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c8)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c16)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c24)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c32)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c1)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c2)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c1)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c2)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c4)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c1)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c2)
DECLARE_QU8_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c4)
#define DECLARE_F32_GAVGPOOL_CW_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t elements, \
size_t channels, \
const float* input, \
float* output, \
const union xnn_f32_gavgpool_params* params);
DECLARE_F32_GAVGPOOL_CW_UKERNEL_FUNCTION(xnn_f32_gavgpool_cw_ukernel__neon_x4)
DECLARE_F32_GAVGPOOL_CW_UKERNEL_FUNCTION(xnn_f32_gavgpool_cw_ukernel__scalar_x1)
DECLARE_F32_GAVGPOOL_CW_UKERNEL_FUNCTION(xnn_f32_gavgpool_cw_ukernel__sse_x4)
DECLARE_F32_GAVGPOOL_CW_UKERNEL_FUNCTION(xnn_f32_gavgpool_cw_ukernel__wasmsimd_arm_x4)
DECLARE_F32_GAVGPOOL_CW_UKERNEL_FUNCTION(xnn_f32_gavgpool_cw_ukernel__wasmsimd_x86_x4)
#define DECLARE_F16_GAVGPOOL_CW_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t elements, \
size_t channels, \
const void* input, \
void* output, \
const union xnn_f16_gavgpool_params* params);
DECLARE_F16_GAVGPOOL_CW_UKERNEL_FUNCTION(xnn_f16_gavgpool_cw_ukernel__neonfp16arith_x8)
#ifdef __cplusplus
} // extern "C"
#endif
| 23,703
| 69.338279
| 115
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/ibilinear.h
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/common.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_F16_IBILINEAR_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t channels, \
const void** input, \
size_t input_offset, \
const void* weights, \
void* output, \
size_t output_increment);
DECLARE_F16_IBILINEAR_UKERNEL_FUNCTION(xnn_f16_ibilinear_ukernel__fma3_c8)
DECLARE_F16_IBILINEAR_UKERNEL_FUNCTION(xnn_f16_ibilinear_ukernel__fma3_c16)
DECLARE_F16_IBILINEAR_UKERNEL_FUNCTION(xnn_f16_ibilinear_ukernel__neonfp16arith_c8)
DECLARE_F16_IBILINEAR_UKERNEL_FUNCTION(xnn_f16_ibilinear_ukernel__neonfp16arith_c16)
#define DECLARE_F32_IBILINEAR_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t channels, \
const float** input, \
size_t input_offset, \
const float* weights, \
float* output, \
size_t output_increment);
DECLARE_F32_IBILINEAR_UKERNEL_FUNCTION(xnn_f32_ibilinear_ukernel__scalar_c1)
DECLARE_F32_IBILINEAR_UKERNEL_FUNCTION(xnn_f32_ibilinear_ukernel__scalar_c2)
DECLARE_F32_IBILINEAR_UKERNEL_FUNCTION(xnn_f32_ibilinear_ukernel__scalar_c4)
DECLARE_F32_IBILINEAR_UKERNEL_FUNCTION(xnn_f32_ibilinear_ukernel__neon_c4)
DECLARE_F32_IBILINEAR_UKERNEL_FUNCTION(xnn_f32_ibilinear_ukernel__neon_c8)
DECLARE_F32_IBILINEAR_UKERNEL_FUNCTION(xnn_f32_ibilinear_ukernel__neonfma_c4)
DECLARE_F32_IBILINEAR_UKERNEL_FUNCTION(xnn_f32_ibilinear_ukernel__neonfma_c8)
DECLARE_F32_IBILINEAR_UKERNEL_FUNCTION(xnn_f32_ibilinear_ukernel__sse_c4)
DECLARE_F32_IBILINEAR_UKERNEL_FUNCTION(xnn_f32_ibilinear_ukernel__sse_c8)
DECLARE_F32_IBILINEAR_UKERNEL_FUNCTION(xnn_f32_ibilinear_ukernel__wasmsimd_c4)
DECLARE_F32_IBILINEAR_UKERNEL_FUNCTION(xnn_f32_ibilinear_ukernel__wasmsimd_c8)
DECLARE_F32_IBILINEAR_UKERNEL_FUNCTION(xnn_f32_ibilinear_ukernel__wasmrelaxedsimd_c4)
DECLARE_F32_IBILINEAR_UKERNEL_FUNCTION(xnn_f32_ibilinear_ukernel__wasmrelaxedsimd_c8)
#define DECLARE_S8_IBILINEAR_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t channels, \
const int8_t** input, \
size_t input_offset, \
const int16_t* weights, \
int8_t* output, \
size_t output_increment);
DECLARE_S8_IBILINEAR_UKERNEL_FUNCTION(xnn_s8_ibilinear_ukernel__scalar_c1)
DECLARE_S8_IBILINEAR_UKERNEL_FUNCTION(xnn_s8_ibilinear_ukernel__scalar_c2)
DECLARE_S8_IBILINEAR_UKERNEL_FUNCTION(xnn_s8_ibilinear_ukernel__scalar_c4)
DECLARE_S8_IBILINEAR_UKERNEL_FUNCTION(xnn_s8_ibilinear_ukernel__neon_c8)
DECLARE_S8_IBILINEAR_UKERNEL_FUNCTION(xnn_s8_ibilinear_ukernel__neon_c16)
DECLARE_S8_IBILINEAR_UKERNEL_FUNCTION(xnn_s8_ibilinear_ukernel__sse2_c8)
DECLARE_S8_IBILINEAR_UKERNEL_FUNCTION(xnn_s8_ibilinear_ukernel__sse2_c16)
DECLARE_S8_IBILINEAR_UKERNEL_FUNCTION(xnn_s8_ibilinear_ukernel__sse41_c8)
DECLARE_S8_IBILINEAR_UKERNEL_FUNCTION(xnn_s8_ibilinear_ukernel__sse41_c16)
DECLARE_S8_IBILINEAR_UKERNEL_FUNCTION(xnn_s8_ibilinear_ukernel__wasmsimd_dot16x2_c8)
DECLARE_S8_IBILINEAR_UKERNEL_FUNCTION(xnn_s8_ibilinear_ukernel__wasmsimd_dot16x2_c16)
DECLARE_S8_IBILINEAR_UKERNEL_FUNCTION(xnn_s8_ibilinear_ukernel__wasmsimd_mul32_c8)
DECLARE_S8_IBILINEAR_UKERNEL_FUNCTION(xnn_s8_ibilinear_ukernel__wasmsimd_mul32_c16)
#define DECLARE_U8_IBILINEAR_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t channels, \
const uint8_t** input, \
size_t input_offset, \
const int16_t* weights, \
uint8_t* output, \
size_t output_increment);
DECLARE_U8_IBILINEAR_UKERNEL_FUNCTION(xnn_u8_ibilinear_ukernel__scalar_c1)
DECLARE_U8_IBILINEAR_UKERNEL_FUNCTION(xnn_u8_ibilinear_ukernel__scalar_c2)
DECLARE_U8_IBILINEAR_UKERNEL_FUNCTION(xnn_u8_ibilinear_ukernel__scalar_c4)
DECLARE_U8_IBILINEAR_UKERNEL_FUNCTION(xnn_u8_ibilinear_ukernel__neon_c8)
DECLARE_U8_IBILINEAR_UKERNEL_FUNCTION(xnn_u8_ibilinear_ukernel__neon_c16)
DECLARE_U8_IBILINEAR_UKERNEL_FUNCTION(xnn_u8_ibilinear_ukernel__sse2_c8)
DECLARE_U8_IBILINEAR_UKERNEL_FUNCTION(xnn_u8_ibilinear_ukernel__sse2_c16)
DECLARE_U8_IBILINEAR_UKERNEL_FUNCTION(xnn_u8_ibilinear_ukernel__sse41_c8)
DECLARE_U8_IBILINEAR_UKERNEL_FUNCTION(xnn_u8_ibilinear_ukernel__sse41_c16)
DECLARE_U8_IBILINEAR_UKERNEL_FUNCTION(xnn_u8_ibilinear_ukernel__wasmsimd_dot16x2_c8)
DECLARE_U8_IBILINEAR_UKERNEL_FUNCTION(xnn_u8_ibilinear_ukernel__wasmsimd_dot16x2_c16)
DECLARE_U8_IBILINEAR_UKERNEL_FUNCTION(xnn_u8_ibilinear_ukernel__wasmsimd_mul32_c8)
DECLARE_U8_IBILINEAR_UKERNEL_FUNCTION(xnn_u8_ibilinear_ukernel__wasmsimd_mul32_c16)
#define DECLARE_F32_IBILINEAR_CHW_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t channels, \
const float** input, \
size_t input_offset, \
const float* weights, \
float* output, \
size_t input_increment);
DECLARE_F32_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f32_ibilinear_chw_ukernel__scalar_p1)
DECLARE_F32_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f32_ibilinear_chw_ukernel__scalar_p2)
DECLARE_F32_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f32_ibilinear_chw_ukernel__scalar_p4)
DECLARE_F32_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f32_ibilinear_chw_ukernel__wasmsimd_p4)
DECLARE_F32_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8)
DECLARE_F32_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f32_ibilinear_chw_ukernel__neon_p4)
DECLARE_F32_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f32_ibilinear_chw_ukernel__neon_p8)
DECLARE_F32_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f32_ibilinear_chw_ukernel__neon_p16)
DECLARE_F32_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f32_ibilinear_chw_ukernel__neonfma_p4)
DECLARE_F32_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f32_ibilinear_chw_ukernel__neonfma_p8)
DECLARE_F32_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f32_ibilinear_chw_ukernel__neonfma_p16)
DECLARE_F32_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f32_ibilinear_chw_ukernel__sse_p4)
DECLARE_F32_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f32_ibilinear_chw_ukernel__sse_p8)
#define DECLARE_F16_IBILINEAR_CHW_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t channels, \
const void** input, \
size_t input_offset, \
const void* weights, \
void* output, \
size_t input_increment);
DECLARE_F16_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f16_ibilinear_chw_ukernel__neonfp16arith_p4)
DECLARE_F16_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f16_ibilinear_chw_ukernel__neonfp16arith_p8)
DECLARE_F16_IBILINEAR_CHW_UKERNEL_FUNCTION(xnn_f16_ibilinear_chw_ukernel__neonfp16arith_p16)
#ifdef __cplusplus
} // extern "C"
#endif
| 8,117
| 46.752941
| 92
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/indirection.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#include <xnnpack.h>
#include <xnnpack/common.h>
#ifdef __cplusplus
extern "C" {
#endif
XNN_INTERNAL void xnn_indirection_init_conv2d(
xnn_operator_t op,
size_t output_tile_size,
uint32_t log2_element_size);
XNN_INTERNAL void xnn_indirection_init_dwconv2d(
xnn_operator_t op,
size_t step_height,
size_t step_width,
size_t primary_tile,
uint32_t log2_element_size);
XNN_INTERNAL void xnn_indirection_init_deconv2d(
xnn_operator_t op,
size_t output_tile_size,
uint32_t log2_element_size);
XNN_INTERNAL void xnn_indirection_init_subconv2d(
xnn_operator_t op,
size_t output_tile_size,
uint32_t log2_element_size);
XNN_INTERNAL void xnn_indirection_init_maxpool2d(
xnn_operator_t op,
size_t step_height,
size_t step_width,
uint32_t log2_element_size);
typedef void (*xnn_indirection_init_resize_bilinear2d_hwc_fn)(
size_t input_pixel_stride,
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
const void* input,
const void** indirection_buffer,
void* packed_weights,
bool align_corners,
bool tensorflow_legacy);
XNN_INTERNAL void xnn_indirection_init_resize_bilinear2d_hwc_f16(
size_t input_pixel_stride,
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
const void* input,
const void** indirection_buffer,
void* packed_weights,
bool align_corners,
bool tensorflow_legacy);
XNN_INTERNAL void xnn_indirection_init_resize_bilinear2d_hwc_f32(
size_t input_pixel_stride,
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
const void* input,
const void** indirection_buffer,
float* packed_weights,
bool align_corners,
bool tensorflow_legacy);
XNN_INTERNAL void xnn_indirection_init_resize_bilinear2d_hwc_q11(
size_t input_pixel_stride,
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
const void* input,
const void** indirection_buffer,
int16_t* packed_weights,
bool align_corners,
bool tensorflow_legacy);
typedef void (*xnn_indirection_init_resize_bilinear2d_chw_fn)(
size_t input_pixel_stride,
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
const void* input,
const void** indirection_buffer,
void* packed_weights,
bool align_corners,
bool tensorflow_legacy);
XNN_INTERNAL void xnn_indirection_init_resize_bilinear2d_chw_f16(
size_t input_pixel_stride,
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
const void* input,
const void** indirection_buffer,
void* packed_weights,
bool align_corners,
bool tensorflow_legacy);
XNN_INTERNAL void xnn_indirection_init_resize_bilinear2d_chw_f32(
size_t input_pixel_stride,
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
const void* input,
const void** indirection_buffer,
float* packed_weights,
bool align_corners,
bool tensorflow_legacy);
XNN_INTERNAL void xnn_indirection_init_unpool2d(
xnn_operator_t op,
size_t batch_start,
uint32_t log2_element_size);
typedef void (*xnn_indirection_init_pavgpool2d_fn)(
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
size_t pooling_height,
size_t pooling_width,
size_t stride_height,
size_t stride_width,
size_t padding_top,
size_t padding_left,
void* pixelwise_buffer);
XNN_INTERNAL void xnn_indirection_init_pavgpool2d_f16(
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
size_t pooling_height,
size_t pooling_width,
size_t stride_height,
size_t stride_width,
size_t padding_top,
size_t padding_left,
uint16_t* pixelwise_buffer);
XNN_INTERNAL void xnn_indirection_init_pavgpool2d_f32(
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
size_t pooling_height,
size_t pooling_width,
size_t stride_height,
size_t stride_width,
size_t padding_top,
size_t padding_left,
float* pixelwise_buffer);
#ifdef __cplusplus
} // extern "C"
#endif
| 4,445
| 23.563536
| 72
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/isa-checks.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#if XNN_ARCH_X86
#define TEST_REQUIRES_X86_SSE \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_X86_SSE
#endif
#if XNN_ARCH_X86
#define TEST_REQUIRES_X86_SSE2 \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_X86_SSE2
#endif
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
#define TEST_REQUIRES_X86_SSSE3 \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_x86_ssse3) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_X86_SSSE3
#endif
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
#define TEST_REQUIRES_X86_SSE41 \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_x86_sse4_1) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_X86_SSE41
#endif
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
#define TEST_REQUIRES_X86_AVX \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_x86_avx) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_X86_AVX
#endif
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
#define TEST_REQUIRES_X86_F16C \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_x86_f16c) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_X86_F16C
#endif
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
#define TEST_REQUIRES_X86_XOP \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_x86_xop) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_X86_XOP
#endif
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
#define TEST_REQUIRES_X86_FMA3 \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_x86_fma3) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_X86_FMA3
#endif
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
#define TEST_REQUIRES_X86_AVX2 \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_x86_avx2) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_X86_AVX2
#endif
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
#define TEST_REQUIRES_X86_AVX512F \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_x86_avx512f) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_X86_AVX512F
#endif
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
#define TEST_REQUIRES_X86_AVX512SKX \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_x86_avx512skx) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_X86_AVX512SKX
#endif
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
#define TEST_REQUIRES_X86_AVX512VBMI \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_x86_avx512vbmi) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_X86_AVX512VBMI
#endif
#if XNN_ARCH_ARM
#define TEST_REQUIRES_ARM_SIMD32 \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_arm_v6) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_ARM_SIMD32
#endif
#if XNN_ARCH_ARM || XNN_ARCH_ARM64
#define TEST_REQUIRES_ARM_FP16_ARITH \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_arm_fp16_arith) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_ARM_FP16_ARITH
#endif
#if XNN_ARCH_ARM
#define TEST_REQUIRES_ARM_NEON \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_arm_neon) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_ARM_NEON
#endif
#if XNN_ARCH_ARM
#define TEST_REQUIRES_ARM_NEON_FP16 \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_arm_neon_fp16) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_ARM_NEON_FP16
#endif
#if XNN_ARCH_ARM
#define TEST_REQUIRES_ARM_NEON_FMA \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_arm_neon_fma) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_ARM_NEON_FMA
#endif
#if XNN_ARCH_ARM
#define TEST_REQUIRES_ARM_NEON_V8 \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_arm_neon_v8) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_ARM_NEON_V8
#endif
#if XNN_ARCH_ARM || XNN_ARCH_ARM64
#define TEST_REQUIRES_ARM_NEON_FP16_ARITH \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_arm_neon_fp16_arith) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_ARM_NEON_FP16_ARITH
#endif
#if XNN_ARCH_ARM || XNN_ARCH_ARM64
#define TEST_REQUIRES_ARM_NEON_BF16 \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_arm_neon_bf16) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_ARM_NEON_BF16
#endif
#if XNN_ARCH_ARM || XNN_ARCH_ARM64
#define TEST_REQUIRES_ARM_NEON_DOT \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_arm_neon_dot) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_ARM_NEON_DOT
#endif
#if XNN_ARCH_RISCV
#define TEST_REQUIRES_RISCV_VECTOR \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_riscv_vector) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_RISCV_VECTOR
#endif
#if XNN_ARCH_WASMRELAXEDSIMD
#define TEST_REQUIRES_WASM_PSHUFB \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_wasm_pshufb) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_WASM_PSHUFB
#endif
#if XNN_ARCH_WASMRELAXEDSIMD
#define TEST_REQUIRES_WASM_SDOT \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_wasm_sdot) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_WASM_SDOT
#endif
#if XNN_ARCH_WASMRELAXEDSIMD
#define TEST_REQUIRES_WASM_BLENDVPS \
do { \
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); \
if (hardware_config == nullptr || !hardware_config->use_wasm_blendvps) { \
GTEST_SKIP(); \
} \
} while (0)
#else
#define TEST_REQUIRES_WASM_BLENDVPS
#endif
| 8,843
| 27.165605
| 87
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/log.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <inttypes.h>
#include <stdarg.h>
#include <stdlib.h>
#include <xnnpack.h>
#include <xnnpack/node-type.h>
#ifndef XNN_LOG_LEVEL
#error "Undefined XNN_LOG_LEVEL"
#endif
#define XNN_LOG_NONE 0
#define XNN_LOG_FATAL 1
#define XNN_LOG_ERROR 2
#define XNN_LOG_WARNING 3
#define XNN_LOG_INFO 4
#define XNN_LOG_DEBUG 5
#ifdef __cplusplus
extern "C" {
#endif
#if XNN_LOG_LEVEL >= XNN_LOG_DEBUG
void xnn_vlog_debug(const char* format, va_list args);
#endif
#if XNN_LOG_LEVEL >= XNN_LOG_INFO
void xnn_vlog_info(const char* format, va_list args);
#endif
#if XNN_LOG_LEVEL >= XNN_LOG_WARNING
void xnn_vlog_warning(const char* format, va_list args);
#endif
#if XNN_LOG_LEVEL >= XNN_LOG_ERROR
void xnn_vlog_error(const char* format, va_list args);
#endif
#if XNN_LOG_LEVEL >= XNN_LOG_FATAL
void xnn_vlog_fatal(const char* format, va_list args);
#endif
#if XNN_LOG_LEVEL == XNN_LOG_NONE
inline static const char* xnn_datatype_to_string(enum xnn_datatype type) {
return "Unknown";
}
inline static const char* xnn_node_type_to_string(enum xnn_node_type type) {
return "Unknown";
}
#else
const char* xnn_datatype_to_string(enum xnn_datatype type);
const char* xnn_node_type_to_string(enum xnn_node_type type);
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#ifndef XNN_LOG_ARGUMENTS_FORMAT
#ifdef __GNUC__
#define XNN_LOG_ARGUMENTS_FORMAT __attribute__((__format__(__printf__, 1, 2)))
#else
#define XNN_LOG_ARGUMENTS_FORMAT
#endif
#endif
XNN_LOG_ARGUMENTS_FORMAT inline static void xnn_log_debug(const char* format, ...) {
#if XNN_LOG_LEVEL >= XNN_LOG_DEBUG
va_list args;
va_start(args, format);
xnn_vlog_debug(format, args);
va_end(args);
#endif
}
XNN_LOG_ARGUMENTS_FORMAT inline static void xnn_log_info(const char* format, ...) {
#if XNN_LOG_LEVEL >= XNN_LOG_INFO
va_list args;
va_start(args, format);
xnn_vlog_info(format, args);
va_end(args);
#endif
}
XNN_LOG_ARGUMENTS_FORMAT inline static void xnn_log_warning(const char* format, ...) {
#if XNN_LOG_LEVEL >= XNN_LOG_WARNING
va_list args;
va_start(args, format);
xnn_vlog_warning(format, args);
va_end(args);
#endif
}
XNN_LOG_ARGUMENTS_FORMAT inline static void xnn_log_error(const char* format, ...) {
#if XNN_LOG_LEVEL >= XNN_LOG_ERROR
va_list args;
va_start(args, format);
xnn_vlog_error(format, args);
va_end(args);
#endif
}
XNN_LOG_ARGUMENTS_FORMAT inline static void xnn_log_fatal(const char* format, ...) {
#if XNN_LOG_LEVEL >= XNN_LOG_FATAL
va_list args;
va_start(args, format);
xnn_vlog_fatal(format, args);
va_end(args);
#endif
abort();
}
| 2,923
| 22.580645
| 86
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/lut.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/common.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_X8_LUT_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t n, \
const uint8_t* x, \
uint8_t* y, \
const uint8_t* t);
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__scalar_x1)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__scalar_x2)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__scalar_x4)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__scalar_x8)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__scalar_x16)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__aarch64_neon_tbx128x4_x16)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__aarch64_neon_tbx128x4_x32)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__aarch64_neon_tbx128x4_x48)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__aarch64_neon_tbx128x4_x64)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__ssse3_x16)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__ssse3_x32)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx_x16)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx_x32)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx_x48)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx_x64)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx2_x32)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx2_x64)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx2_x96)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx2_x128)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx512skx_vpshufb_x64)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx512skx_vpshufb_x128)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx512skx_vpshufb_x192)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx512skx_vpshufb_x256)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx512vbmi_vpermx2b_x64)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx512vbmi_vpermx2b_x128)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx512vbmi_vpermx2b_x192)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__avx512vbmi_vpermx2b_x256)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__wasmpshufb_x16)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__wasmpshufb_x32)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__wasmpshufb_x48)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__wasmpshufb_x64)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__wasmsimd_x16)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__wasmsimd_x32)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__wasmsimd_x48)
DECLARE_X8_LUT_UKERNEL_FUNCTION(xnn_x8_lut_ukernel__wasmsimd_x64)
#define DECLARE_U8_LUT32NORM_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t n, \
const uint8_t* x, \
const uint32_t* t, \
uint8_t* y);
DECLARE_U8_LUT32NORM_UKERNEL_FUNCTION(xnn_u8_lut32norm_ukernel__scalar)
#ifdef __cplusplus
} // extern "C"
#endif
| 3,483
| 39.511628
| 78
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/math.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#include <stdlib.h> // For _rotl.
#endif
#include <xnnpack/common.h>
// stdlib.h from Windows 10 SDK defines min & max macros.
// Undefine them before defining the corresponding functions.
#ifdef min
#undef min
#endif
#ifdef max
#undef max
#endif
XNN_INLINE static size_t min(size_t a, size_t b) {
return XNN_UNPREDICTABLE(b < a) ? b : a;
}
XNN_INLINE static size_t max(size_t a, size_t b) {
return XNN_UNPREDICTABLE(b < a) ? a : b;
}
XNN_INLINE static size_t doz(size_t a, size_t b) {
return XNN_UNPREDICTABLE(b < a) ? a - b : 0;
}
XNN_INLINE static size_t divide_round_up(size_t n, size_t q) {
return XNN_UNPREDICTABLE(n % q == 0) ? n / q : n / q + 1;
}
XNN_INLINE static size_t round_up(size_t n, size_t q) {
return divide_round_up(n, q) * q;
}
XNN_INLINE static bool is_po2(size_t n) {
return (n != 0) && ((n & (n - 1)) == 0);
}
XNN_INLINE static size_t round_down_po2(size_t n, size_t q) {
assert(is_po2(q));
return n & -q;
}
XNN_INLINE static size_t round_up_po2(size_t n, size_t q) {
return round_down_po2(n + q - 1, q);
}
XNN_INLINE static size_t mod_po2(size_t n, size_t m) {
assert(is_po2(m));
return n & (m - 1);
}
XNN_INLINE static size_t subtract_modulo(size_t a, size_t b, size_t m) {
assert(a < m);
assert(b < m);
return XNN_UNPREDICTABLE(a >= b) ? a - b : a - b + m;
}
XNN_INLINE static float uint32_as_float(uint32_t i) {
union {
uint32_t as_uint32;
float as_float;
} bits = { i };
return bits.as_float;
}
XNN_INLINE static uint32_t float_as_uint32(float f) {
union {
float as_float;
uint32_t as_uint32;
} bits = { f };
return bits.as_uint32;
}
XNN_INLINE static double uint64_as_double(uint64_t i) {
union {
uint64_t as_uint64;
double as_double;
} bits = { i };
return bits.as_double;
}
XNN_INLINE static uint64_t double_as_uint64(double f) {
union {
double as_double;
uint64_t as_uint64;
} bits = { f };
return bits.as_uint64;
}
XNN_INLINE static uint32_t math_abs_s32(int32_t n) {
#if defined(_MSC_VER)
return (uint32_t) abs((int) n);
#else
return XNN_UNPREDICTABLE(n >= 0) ? (uint32_t) n : -(uint32_t) n;
#endif
}
XNN_INLINE static int32_t math_min_s32(int32_t a, int32_t b) {
return XNN_UNPREDICTABLE(a < b) ? a : b;
}
XNN_INLINE static int32_t math_max_s32(int32_t a, int32_t b) {
return XNN_UNPREDICTABLE(a > b) ? a : b;
}
XNN_INLINE static uint32_t math_min_u32(uint32_t a, uint32_t b) {
return XNN_UNPREDICTABLE(a < b) ? a : b;
}
XNN_INLINE static uint32_t math_max_u32(uint32_t a, uint32_t b) {
return XNN_UNPREDICTABLE(a > b) ? a : b;
}
XNN_INLINE static uint32_t math_doz_u32(uint32_t a, uint32_t b) {
return XNN_UNPREDICTABLE(a > b) ? a - b : 0;
}
XNN_INLINE static int64_t math_mulext_s32(int32_t a, int32_t b) {
#if defined(_MSC_VER) && defined(_M_IX86)
return (int64_t) __emul((int) a, (int) b);
#else
return (int64_t) a * (int64_t) b;
#endif
}
XNN_INLINE static uint64_t math_mulext_u32(uint32_t a, uint32_t b) {
#if defined(_MSC_VER) && defined(_M_IX86)
return (uint64_t) __emulu((unsigned int) a, (unsigned int) b);
#else
return (uint64_t) a * (uint64_t) b;
#endif
}
XNN_INLINE static float math_muladd_f32(float x, float y, float acc) {
#if defined(__GNUC__) && defined(__FP_FAST_FMAF)
return __builtin_fmaf(x, y, acc);
#elif defined(__clang__) && defined(__riscv)
return __builtin_fmaf(x, y, acc);
#else
return x * y + acc;
#endif
}
XNN_INLINE static float math_pmin_f32(float a, float b) {
return XNN_UNPREDICTABLE(b < a) ? b : a;
}
XNN_INLINE static float math_pmax_f32(float a, float b) {
return XNN_UNPREDICTABLE(b < a) ? a : b;
}
XNN_INLINE static double math_pmin_f64(double a, double b) {
return XNN_UNPREDICTABLE(b < a) ? b : a;
}
XNN_INLINE static double math_pmax_f64(double a, double b) {
return XNN_UNPREDICTABLE(b < a) ? a : b;
}
XNN_INLINE static float math_min_f32(float a, float b) {
#if defined(__GNUC__) && defined(__ARM_ARCH) && (__ARM_ARCH >= 8)
return __builtin_fminf(a, b);
#elif defined(__clang__) && defined(__riscv)
return __builtin_fminf(a, b);
#else
return XNN_UNPREDICTABLE(b < a) ? b : a;
#endif
}
XNN_INLINE static float math_max_f32(float a, float b) {
#if defined(__GNUC__) && defined(__ARM_ARCH) && (__ARM_ARCH >= 8)
return __builtin_fmaxf(a, b);
#elif defined(__clang__) && defined(__riscv)
return __builtin_fmaxf(a, b);
#else
return XNN_UNPREDICTABLE(b < a) ? a : b;
#endif
}
XNN_INLINE static double math_min_f64(double a, double b) {
#if defined(__GNUC__) && defined(__ARM_ARCH) && (__ARM_ARCH >= 8)
return __builtin_fmin(a, b);
#elif defined(__clang__) && defined(__riscv)
return __builtin_fmin(a, b);
#else
return XNN_UNPREDICTABLE(b < a) ? b : a;
#endif
}
XNN_INLINE static double math_max_f64(double a, double b) {
#if defined(__GNUC__) && defined(__ARM_ARCH) && (__ARM_ARCH >= 8)
return __builtin_fmax(a, b);
#elif defined(__clang__) && defined(__riscv)
return __builtin_fmax(a, b);
#else
return XNN_UNPREDICTABLE(b < a) ? a : b;
#endif
}
XNN_INLINE static float math_nonsign_mask_f32() {
#if defined(__INTEL_COMPILER)
// Surprisingly, Intel compiler ignores __builtin_nanf payload
return _castu32_f32(0x7FFFFFFF);
#elif defined(__GNUC__)
return __builtin_nanf("0x7FFFFF");
#else
union {
uint32_t as_word;
float as_float;
} f;
f.as_word = 0x7FFFFFFF;
return f.as_float;
#endif
}
#if defined(__clang__)
#if __clang_major__ == 3 && __clang_minor__ >= 7 || __clang_major__ > 3
#define XNN_IGNORE_SHIFT_BASE_UB __attribute__((__no_sanitize__("shift-base")))
#else
#define XNN_IGNORE_SHIFT_BASE_UB
#endif
#elif defined(__GNUC__)
#if __GNUC__ >= 8
#define XNN_IGNORE_SHIFT_BASE_UB __attribute__((__no_sanitize__("shift-base")))
#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 || __GNUC__ > 4
// 4.9 <= gcc < 8 support ubsan, but doesn't support no_sanitize attribute
#define XNN_IGNORE_SHIFT_BASE_UB
#ifndef XNN_USE_SHIFT_BASE_UB_WORKAROUND
#define XNN_USE_SHIFT_BASE_UB_WORKAROUND 1
#endif
#else
#define XNN_IGNORE_SHIFT_BASE_UB
#endif
#else
#define XNN_IGNORE_SHIFT_BASE_UB
#endif
XNN_IGNORE_SHIFT_BASE_UB
XNN_INLINE static int32_t math_asr_s32(int32_t x, uint32_t n) {
#ifdef XNN_USE_SHIFT_BASE_UB_WORKAROUND
#if XNN_ARCH_X86_64 || XNN_ARCH_ARM64
return (int32_t) ((uint64_t) (int64_t) x >> n);
#else
return x >= 0 ? x >> n : ~(~x >> n);
#endif
#else
return x >> n;
#endif
}
XNN_IGNORE_SHIFT_BASE_UB
XNN_INLINE static int64_t math_asr_s64(int64_t x, uint32_t n) {
#ifdef XNN_USE_SHIFT_BASE_UB_WORKAROUND
return x >= 0 ? x >> n : ~(~x >> n);
#else
return x >> n;
#endif
}
XNN_INLINE static uint32_t math_clz_u32(uint32_t x) {
#if defined(_MSC_VER) && !defined(__clang__)
unsigned long index;
if XNN_UNPREDICTABLE(_BitScanReverse(&index, (unsigned long) x) != 0) {
return (uint32_t) index ^ 31;
} else {
return 32;
}
#else
if XNN_UNPREDICTABLE(x == 0) {
return 32;
} else {
return (uint32_t) __builtin_clz((unsigned int) x);
}
#endif
}
XNN_INLINE static uint32_t math_clz_nonzero_u32(uint32_t x) {
assert(x != 0);
#if defined(_MSC_VER) && !defined(__clang__)
unsigned long index;
_BitScanReverse(&index, (unsigned long) x);
return (uint32_t) index ^ 31;
#else
return (uint32_t) __builtin_clz((unsigned int) x);
#endif
}
XNN_INLINE static uint32_t math_ctz_u32(uint32_t x) {
#if defined(_MSC_VER) && !defined(__clang__)
unsigned long index;
_BitScanForward(&index, (unsigned long) x);
return (uint32_t) index;
#else
return (uint32_t) __builtin_ctz((unsigned int) x);
#endif
}
XNN_INLINE static uint32_t math_rotl_u32(uint32_t x, int8_t r)
{
#if XNN_COMPILER_MSVC
return _rotl((unsigned int) x, (int) r);
#else
return (x << r) | (x >> (32 - r));
#endif
}
#ifndef __cplusplus
XNN_INLINE static uint32_t math_cvt_sat_u32_f64(double x) {
#if defined(__GNUC__) && defined(__arm__)
uint32_t i;
__asm__ ("vcvt.u32.f64 %[i], %P[x]"
: [i] "=t" (i)
: [x] "w" (x));
return i;
#elif defined(__GNUC__) && defined(__aarch64__)
uint32_t i;
__asm__ ("fcvtnu %w[i], %d[x]"
: [i] "=r" (i)
: [x] "w" (x));
return i;
#elif defined(__GNUC__) && defined(__riscv)
uint32_t i;
__asm__ ("fcvt.wu.d %[i], %[x], rne"
: [i] "=r" (i)
: [x] "f" (x));
return i;
#elif defined(__clang__) && defined(__wasm__) && defined(__wasm_nontrapping_fptoint__)
return __builtin_wasm_trunc_saturate_u_i32_f64(rint(x));
#else
x = math_max_f64(x, 0.0);
x = math_min_f64(x, 4294967295.0);
return (uint32_t) double_as_uint64(x + 0x1.0p+52);
#endif
}
#endif
| 9,198
| 24.839888
| 88
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/maxpool.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_F16_MAXPOOL_MINMAX_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t kernel_size, \
size_t channels, \
const void** input, \
size_t input_offset, \
void* output, \
size_t input_increment, \
size_t output_increment, \
const union xnn_f16_minmax_params* params);
DECLARE_F16_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_f16_maxpool_minmax_ukernel_9p8x__f16c_c8)
DECLARE_F16_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_f16_maxpool_minmax_ukernel_9p8x__neonfp16arith_c8)
#define DECLARE_F32_MAXPOOL_MINMAX_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t kernel_size, \
size_t channels, \
const float** input, \
size_t input_offset, \
float* output, \
size_t input_increment, \
size_t output_increment, \
const union xnn_f32_minmax_params* params);
DECLARE_F32_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_f32_maxpool_minmax_ukernel_9p8x__neon_c4)
DECLARE_F32_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_f32_maxpool_minmax_ukernel_9p8x__scalar_c1)
DECLARE_F32_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_f32_maxpool_minmax_ukernel_9p8x__sse_c4)
DECLARE_F32_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_f32_maxpool_minmax_ukernel_9p8x__wasm_c1)
DECLARE_F32_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_f32_maxpool_minmax_ukernel_9p8x__wasmsimd_arm_c4)
DECLARE_F32_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_f32_maxpool_minmax_ukernel_9p8x__wasmsimd_x86_c4)
#define DECLARE_U8_MAXPOOL_MINMAX_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t kernel_size, \
size_t channels, \
const uint8_t** input, \
size_t input_offset, \
uint8_t* output, \
size_t input_increment, \
size_t output_increment, \
const union xnn_u8_minmax_params* params);
DECLARE_U8_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16)
DECLARE_U8_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_u8_maxpool_minmax_ukernel_9p8x__scalar_c1)
DECLARE_U8_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_u8_maxpool_minmax_ukernel_9p8x__sse2_c16)
DECLARE_U8_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_u8_maxpool_minmax_ukernel_9p8x__wasmsimd_c16)
#define DECLARE_S8_MAXPOOL_MINMAX_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t output_pixels, \
size_t kernel_size, \
size_t channels, \
const int8_t** input, \
size_t input_offset, \
int8_t* output, \
size_t input_increment, \
size_t output_increment, \
const union xnn_s8_minmax_params* params);
DECLARE_S8_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_s8_maxpool_minmax_ukernel_2p2x__neon_c16)
DECLARE_S8_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_s8_maxpool_minmax_ukernel_4p3x__neon_c16)
DECLARE_S8_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_s8_maxpool_minmax_ukernel_9p8x__neon_c16)
DECLARE_S8_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_s8_maxpool_minmax_ukernel_9p8x__scalar_c1)
DECLARE_S8_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_s8_maxpool_minmax_ukernel_9p8x__sse2_c16)
DECLARE_S8_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_s8_maxpool_minmax_ukernel_9p8x__sse41_c16)
DECLARE_S8_MAXPOOL_MINMAX_UKERNEL_FUNCTION(xnn_s8_maxpool_minmax_ukernel_9p8x__wasmsimd_c16)
#ifdef __cplusplus
} // extern "C"
#endif
| 4,891
| 47.92
| 98
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/memory-planner.h
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/common.h>
#include <xnnpack/allocator.h>
#ifdef __cplusplus
extern "C" {
#endif
struct xnn_usage_record {
// The index (to xnn_runtime_t->opdata) of the first xnn_node that uses this xnn_value.
uint32_t first_node;
// The index of the last xnn_node that uses this xnn_value.
uint32_t last_node;
// Note that 'tensor_size' includes the padding of XNN_EXTRA_BYTES.
size_t tensor_size;
// The memory offset of this xnn_value from the beginning of a memory buffer.
size_t alloc_offset;
// If an operation is performed in place, the alloc_offset of the output tensor is the same as the alloc_offset of the
// input tensor. The id of the input tensor is recorded in this field. This is XNN_INVALID_VALUE_ID if it does not
// reuse any tensor.
uint32_t reuse_value_id;
// This usage record is not tied to an actual value, but a temporary associated with an opdata, like a dynamic fully
// connected operation. We need the opdata's id to lookup and intialize opdata's pointers.
uint32_t opdata_id;
};
// Track the memory allocation in a memory arena for a runtime.
struct xnn_value_allocation_tracker {
size_t mem_arena_size;
// Representing the lifecycle of xnn_values in the 'runtime', and the array size is 'runtime->num_values +
// runtime->num_ops'.
struct xnn_usage_record* usage;
// The range of value ids (i.e. the index to runtime->values) whose memory might need to be allocated.
size_t min_value_id;
size_t max_value_id;
};
// Initialize the memory allocation tracker for xnn_values.
XNN_INTERNAL void xnn_init_value_allocation_tracker(struct xnn_value_allocation_tracker* tracker,
const struct xnn_runtime* runtime);
inline static void xnn_release_value_allocation_tracker(struct xnn_value_allocation_tracker* tracker) {
xnn_release_memory(tracker->usage);
}
// Add a to-be-allocated xnn_value (referred by 'value_id') of size 'tensor_size' to the allocation tracker.
// Note: this function assumes 'value_id's added in increasing order for simplicity as it's called inside a loop
// iterating over 'runtime->values'.
XNN_INTERNAL void xnn_add_value_allocation_tracker(struct xnn_value_allocation_tracker* tracker,
uint32_t value_id, size_t tensor_size);
// Add a value to represent operator workspace. This is a temporary buffer that is only used during the invocation of
// operator.
XNN_INTERNAL void xnn_add_operator_workspace_allocation_tracker(
struct xnn_value_allocation_tracker* tracker,
uint32_t operator_workspace_value_id,
size_t tensor_size,
uint32_t opdata_id);
// Mark value_id as reusing the memory that is allocated to another reuse_value_id. No memory is then
// allocated to value_id. The usage record of reuse_value_id needs to be expanded to include al consumers of value_id,
// indicated by new_last_node.
XNN_INTERNAL void xnn_mark_tensor_as_reuse(
struct xnn_value_allocation_tracker* tracker,
uint32_t value_id,
uint32_t reuse_value_id,
uint32_t new_last_node);
// Plan the exact the memory allocation for intermediate tensors according to the xnn_value allocation tracker.
XNN_INTERNAL void xnn_plan_value_allocation_tracker(struct xnn_value_allocation_tracker* tracker);
#ifdef __cplusplus
} // extern "C"
#endif
| 3,600
| 40.872093
| 120
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/microkernel-type.h
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
//
// Auto-generated file. Do not edit!
// Specification: src/enums/microkernel-type.yaml
// Generator: tools/generate-enum.py
#pragma once
#include <xnnpack/common.h>
#ifdef __cplusplus
extern "C" {
#endif
enum xnn_microkernel_type {
xnn_microkernel_type_default = 0,
xnn_microkernel_type_average_pooling,
xnn_microkernel_type_conv2d_hwc2chw,
xnn_microkernel_type_dwconv,
xnn_microkernel_type_gemm,
xnn_microkernel_type_global_average_pooling,
xnn_microkernel_type_igemm,
xnn_microkernel_type_mean,
xnn_microkernel_type_pixelwise_average_pooling,
xnn_microkernel_type_spmm,
xnn_microkernel_type_subconv2d,
xnn_microkernel_type_transpose,
xnn_microkernel_type_vmulcaddc,
};
XNN_INTERNAL const char* xnn_microkernel_type_to_string(enum xnn_microkernel_type microkernel_type);
#ifdef __cplusplus
} // extern "C"
#endif
| 1,019
| 24.5
| 100
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/microkernel-utils.h
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h>
#include <xnnpack/common.h>
#ifdef __cplusplus
extern "C" {
#endif
// The total tile size needed to cover kernel_size.
XNN_INTERNAL size_t xnn_dwconv_multipass_tile_size(
size_t kernel_size,
size_t first_pass_tile,
size_t middle_pass_tile,
size_t last_pass_tile);
// The total count of weights (in bytes) needed for multipass dwconv.
size_t xnn_dwconv_multipass_weights_size(
size_t tile_size,
size_t channels,
size_t channel_tile,
size_t channel_subtile,
size_t channel_round,
size_t bias_element_size,
size_t log2_filter_element_size,
size_t extra_weights_byte);
// Calculate the number of bytes read.
size_t xnn_dwconv_multipass_bytes_read(
size_t kernel_size,
size_t first_pass_tile,
size_t middle_pass_tile,
size_t last_pass_tile,
size_t channels,
size_t channel_tile,
size_t channel_subtile,
size_t channel_round,
size_t log2_input_size,
size_t log2_filter_size,
size_t bias_element_size,
size_t log2_accumulator_size);
// Calculate the number of bytes written.
size_t xnn_dwconv_multipass_bytes_written(
size_t kernel_size,
size_t first_pass_tile,
size_t middle_pass_tile,
size_t last_pass_tile,
size_t channels,
size_t channel_round,
size_t log2_accumulator_size,
size_t log2_output_size);
#ifdef __cplusplus
}
#endif
| 1,496
| 22.761905
| 72
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/models.h
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <xnnpack.h>
#include <memory>
#include <vector>
namespace models {
typedef std::vector<std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)>> ExecutionPlan;
typedef ExecutionPlan (*ExecutionPlanFactory)(pthreadpool_t threadpool);
ExecutionPlan FP32MobileNetV1(pthreadpool_t threadpool);
ExecutionPlan FP32MobileNetV2(pthreadpool_t threadpool);
ExecutionPlan FP32MobileNetV3Large(pthreadpool_t threadpool);
ExecutionPlan FP32MobileNetV3Small(pthreadpool_t threadpool);
ExecutionPlan FP32MobileNetV1Jit(pthreadpool_t threadpool);
ExecutionPlan FP32MobileNetV2Jit(pthreadpool_t threadpool);
ExecutionPlan FP32MobileNetV3LargeJit(pthreadpool_t threadpool);
ExecutionPlan FP32MobileNetV3SmallJit(pthreadpool_t threadpool);
ExecutionPlan FP32MobileNetV3SmallFused(pthreadpool_t threadpool);
ExecutionPlan FP32SparseMobileNetV1(float sparsity, pthreadpool_t threadpool);
ExecutionPlan FP32SparseMobileNetV2(float sparsity, pthreadpool_t threadpool);
ExecutionPlan FP32SparseMobileNetV3Large(float sparsity, pthreadpool_t threadpool);
ExecutionPlan FP32SparseMobileNetV3Small(float sparsity, pthreadpool_t threadpool);
ExecutionPlan FP16MobileNetV1(pthreadpool_t threadpool);
ExecutionPlan FP16MobileNetV2(pthreadpool_t threadpool);
ExecutionPlan FP16MobileNetV3Large(pthreadpool_t threadpool);
ExecutionPlan FP16MobileNetV3Small(pthreadpool_t threadpool);
ExecutionPlan FP16SparseMobileNetV1(float sparsity, pthreadpool_t threadpool);
ExecutionPlan FP16SparseMobileNetV2(float sparsity, pthreadpool_t threadpool);
ExecutionPlan FP16SparseMobileNetV3Large(float sparsity, pthreadpool_t threadpool);
ExecutionPlan FP16SparseMobileNetV3Small(float sparsity, pthreadpool_t threadpool);
ExecutionPlan QC8MobileNetV1(pthreadpool_t threadpool);
ExecutionPlan QC8MobileNetV2(pthreadpool_t threadpool);
ExecutionPlan QS8MobileNetV1(pthreadpool_t threadpool);
ExecutionPlan QS8MobileNetV2(pthreadpool_t threadpool);
ExecutionPlan QU8MobileNetV1(pthreadpool_t threadpool);
ExecutionPlan QU8MobileNetV2(pthreadpool_t threadpool);
} // namespace models
| 2,242
| 40.537037
| 97
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/mutex.h
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <xnnpack.h>
#include <xnnpack/common.h>
#if XNN_PLATFORM_WINDOWS
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#elif XNN_PLATFORM_MACOS || XNN_PLATFORM_IOS
#include <dispatch/dispatch.h>
#else
#include <pthread.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
struct xnn_mutex {
#if XNN_PLATFORM_WINDOWS
HANDLE handle;
#elif XNN_PLATFORM_MACOS || XNN_PLATFORM_IOS
dispatch_semaphore_t semaphore;
#elif XNN_PLATFORM_WEB && !defined(__EMSCRIPTEN_PTHREADS__)
char _; // Dummy member variable to comply with the C standard
#else
pthread_mutex_t mutex;
#endif
};
enum xnn_status xnn_mutex_init(struct xnn_mutex* mutex);
enum xnn_status xnn_mutex_lock(struct xnn_mutex* mutex);
enum xnn_status xnn_mutex_unlock(struct xnn_mutex* mutex);
enum xnn_status xnn_mutex_destroy(struct xnn_mutex* mutex);
#ifdef __cplusplus
} // extern "C"
#endif
| 1,072
| 22.326087
| 72
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/node-type.h
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
enum xnn_node_type {
xnn_node_type_invalid = 0,
xnn_node_type_abs,
xnn_node_type_add2,
xnn_node_type_argmax_pooling_2d,
xnn_node_type_average_pooling_2d,
xnn_node_type_bankers_rounding,
xnn_node_type_batch_matrix_multiply,
xnn_node_type_ceiling,
xnn_node_type_clamp,
xnn_node_type_concatenate2,
xnn_node_type_concatenate3,
xnn_node_type_concatenate4,
xnn_node_type_convert,
xnn_node_type_convolution_2d,
xnn_node_type_copy,
xnn_node_type_deconvolution_2d,
xnn_node_type_depth_to_space,
xnn_node_type_depthwise_convolution_2d,
xnn_node_type_divide,
xnn_node_type_elu,
xnn_node_type_even_split2,
xnn_node_type_even_split3,
xnn_node_type_even_split4,
xnn_node_type_floor,
xnn_node_type_fully_connected,
xnn_node_type_fully_connected_sparse,
xnn_node_type_global_average_pooling_1d,
xnn_node_type_global_average_pooling_2d,
xnn_node_type_global_sum_pooling_1d,
xnn_node_type_global_sum_pooling_2d,
xnn_node_type_hardswish,
xnn_node_type_leaky_relu,
xnn_node_type_max_pooling_2d,
xnn_node_type_maximum2,
xnn_node_type_minimum2,
xnn_node_type_multiply2,
xnn_node_type_negate,
xnn_node_type_prelu,
xnn_node_type_rope,
xnn_node_type_sigmoid,
xnn_node_type_softmax,
xnn_node_type_space_to_depth_2d,
xnn_node_type_square,
xnn_node_type_square_root,
xnn_node_type_squared_difference,
xnn_node_type_static_constant_pad,
xnn_node_type_static_mean,
xnn_node_type_static_reshape,
xnn_node_type_static_resize_bilinear_2d,
xnn_node_type_static_slice,
xnn_node_type_static_transpose,
xnn_node_type_subtract,
xnn_node_type_tanh,
xnn_node_type_unpooling_2d,
};
| 1,828
| 27.578125
| 72
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/normalization.h
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h>
#include <xnnpack.h>
#include <xnnpack/common.h>
#ifdef __cplusplus
extern "C" {
#endif
// Calculates normalized offsets, input_shape, and output_shape.
// Each value in offsets must be less than the corresponding dimension of input_shape.
// Each value in sizes must be > 0 and less than or equals to the corresponding dimension of input_shape.
// This function merges dimensions dimensions that are full slices into the outermost dimension possible.
// E.g. Given input shape { 4, 5, 3 }, with offsets { 0, 2, 0 }, and sizes { 4, 1, 3 }, the innermost dimension is a
// full slice, and so can be merged with its outer dimension, to give normalized input shape of { 4, 15 },
// output shape { 4, 3 } with offsets { 0, 6 }.
void xnn_normalize_slice(
size_t num_dims,
const size_t offsets[XNN_MIN_ELEMENTS(1)],
const size_t sizes[XNN_MIN_ELEMENTS(1)],
const size_t input_shape[XNN_MIN_ELEMENTS(1)],
size_t normalized_offsets[XNN_MIN_ELEMENTS(XNN_MAX_TENSOR_DIMS)],
size_t normalized_input_shape[XNN_MIN_ELEMENTS(XNN_MAX_TENSOR_DIMS)],
size_t normalized_output_shape[XNN_MIN_ELEMENTS(XNN_MAX_TENSOR_DIMS)],
size_t* num_normalized_dims);
void xnn_normalize_transpose_permutation(
size_t num_dims,
size_t element_size,
const size_t* perm,
const size_t* shape,
const size_t* input_stride,
const size_t* output_stride,
size_t* normalized_num_dims,
size_t* normalized_element_size,
size_t* normalized_perm,
size_t* normalized_shape,
size_t* normalized_input_stride,
size_t* normalized_output_stride);
void xnn_normalize_reduction(
size_t* num_reduction_axes_ptr,
size_t* reduction_axes,
size_t* num_input_dims_ptr,
size_t* input_dims);
#ifdef __cplusplus
} // extern "C"
#endif
| 1,973
| 33.631579
| 116
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/operator-type.h
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
//
// Auto-generated file. Do not edit!
// Specification: src/enums/operator-type.yaml
// Generator: tools/generate-enum.py
#pragma once
#include <xnnpack/common.h>
#ifdef __cplusplus
extern "C" {
#endif
enum xnn_operator_type {
xnn_operator_type_invalid = 0,
xnn_operator_type_abs_nc_f16,
xnn_operator_type_abs_nc_f32,
xnn_operator_type_add_nd_f16,
xnn_operator_type_add_nd_f32,
xnn_operator_type_add_nd_qs8,
xnn_operator_type_add_nd_qu8,
xnn_operator_type_argmax_pooling_nhwc_f32,
xnn_operator_type_average_pooling_nhwc_f16,
xnn_operator_type_average_pooling_nhwc_f32,
xnn_operator_type_average_pooling_nhwc_qu8,
xnn_operator_type_bankers_rounding_nc_f16,
xnn_operator_type_bankers_rounding_nc_f32,
xnn_operator_type_batch_matrix_multiply_nc_f32,
xnn_operator_type_ceiling_nc_f16,
xnn_operator_type_ceiling_nc_f32,
xnn_operator_type_channel_shuffle_nc_x8,
xnn_operator_type_channel_shuffle_nc_x32,
xnn_operator_type_clamp_nc_f16,
xnn_operator_type_clamp_nc_f32,
xnn_operator_type_clamp_nc_s8,
xnn_operator_type_clamp_nc_u8,
xnn_operator_type_constant_pad_nd_x8,
xnn_operator_type_constant_pad_nd_x16,
xnn_operator_type_constant_pad_nd_x32,
xnn_operator_type_convert_nc_f16_f32,
xnn_operator_type_convert_nc_f32_f16,
xnn_operator_type_convert_nc_f32_qd8,
xnn_operator_type_convert_nc_f32_qs8,
xnn_operator_type_convert_nc_f32_qu8,
xnn_operator_type_convert_nc_qs8,
xnn_operator_type_convert_nc_qs8_f32,
xnn_operator_type_convert_nc_qs16_qs8,
xnn_operator_type_convert_nc_qu8,
xnn_operator_type_convert_nc_qu8_f32,
xnn_operator_type_convolution_nchw_f16,
xnn_operator_type_convolution_nchw_f32,
xnn_operator_type_convolution_nhwc_f16,
xnn_operator_type_convolution_nhwc_f32,
xnn_operator_type_convolution_nhwc_qc8,
xnn_operator_type_convolution_nhwc_qs8,
xnn_operator_type_convolution_nhwc_qu8,
xnn_operator_type_copy_nc_x8,
xnn_operator_type_copy_nc_x16,
xnn_operator_type_copy_nc_x32,
xnn_operator_type_deconvolution_nhwc_f16,
xnn_operator_type_deconvolution_nhwc_f32,
xnn_operator_type_deconvolution_nhwc_qs8,
xnn_operator_type_deconvolution_nhwc_qu8,
xnn_operator_type_depth_to_space_nchw2nhwc_x16,
xnn_operator_type_depth_to_space_nchw2nhwc_x32,
xnn_operator_type_depth_to_space_nhwc_x8,
xnn_operator_type_depth_to_space_nhwc_x16,
xnn_operator_type_depth_to_space_nhwc_x32,
xnn_operator_type_divide_nd_f16,
xnn_operator_type_divide_nd_f32,
xnn_operator_type_dynamic_fully_connected_nc_f16,
xnn_operator_type_dynamic_fully_connected_nc_f32,
xnn_operator_type_elu_nc_f16,
xnn_operator_type_elu_nc_f32,
xnn_operator_type_elu_nc_qs8,
xnn_operator_type_floor_nc_f16,
xnn_operator_type_floor_nc_f32,
xnn_operator_type_fully_connected_nc_f16,
xnn_operator_type_fully_connected_nc_f32,
xnn_operator_type_fully_connected_nc_f32_qc8w,
xnn_operator_type_fully_connected_nc_qs8,
xnn_operator_type_fully_connected_nc_qu8,
xnn_operator_type_global_average_pooling_ncw_f16,
xnn_operator_type_global_average_pooling_ncw_f32,
xnn_operator_type_global_average_pooling_nwc_f16,
xnn_operator_type_global_average_pooling_nwc_f32,
xnn_operator_type_global_average_pooling_nwc_qs8,
xnn_operator_type_global_average_pooling_nwc_qu8,
xnn_operator_type_global_sum_pooling_nwc_f16,
xnn_operator_type_global_sum_pooling_nwc_f32,
xnn_operator_type_hardswish_nc_f16,
xnn_operator_type_hardswish_nc_f32,
xnn_operator_type_leaky_relu_nc_f16,
xnn_operator_type_leaky_relu_nc_f32,
xnn_operator_type_leaky_relu_nc_qs8,
xnn_operator_type_leaky_relu_nc_qu8,
xnn_operator_type_max_pooling_nhwc_f16,
xnn_operator_type_max_pooling_nhwc_f32,
xnn_operator_type_max_pooling_nhwc_s8,
xnn_operator_type_max_pooling_nhwc_u8,
xnn_operator_type_maximum_nd_f16,
xnn_operator_type_maximum_nd_f32,
xnn_operator_type_mean_nd_f16,
xnn_operator_type_mean_nd_f32,
xnn_operator_type_minimum_nd_f16,
xnn_operator_type_minimum_nd_f32,
xnn_operator_type_multiply_nd_f16,
xnn_operator_type_multiply_nd_f32,
xnn_operator_type_multiply_nd_qs8,
xnn_operator_type_multiply_nd_qu8,
xnn_operator_type_negate_nc_f16,
xnn_operator_type_negate_nc_f32,
xnn_operator_type_prelu_nc_f16,
xnn_operator_type_prelu_nc_f32,
xnn_operator_type_resize_bilinear_nchw_f16,
xnn_operator_type_resize_bilinear_nchw_f32,
xnn_operator_type_resize_bilinear_nhwc_f16,
xnn_operator_type_resize_bilinear_nhwc_f32,
xnn_operator_type_resize_bilinear_nhwc_s8,
xnn_operator_type_resize_bilinear_nhwc_u8,
xnn_operator_type_rope_nthc_f32,
xnn_operator_type_sigmoid_nc_f16,
xnn_operator_type_sigmoid_nc_f32,
xnn_operator_type_sigmoid_nc_qs8,
xnn_operator_type_sigmoid_nc_qu8,
xnn_operator_type_slice_nd_x8,
xnn_operator_type_slice_nd_x16,
xnn_operator_type_slice_nd_x32,
xnn_operator_type_softmax_nc_f16,
xnn_operator_type_softmax_nc_f32,
xnn_operator_type_softmax_nc_qu8,
xnn_operator_type_space_to_depth_nhwc_x8,
xnn_operator_type_space_to_depth_nhwc_x16,
xnn_operator_type_space_to_depth_nhwc_x32,
xnn_operator_type_square_nc_f16,
xnn_operator_type_square_nc_f32,
xnn_operator_type_square_root_nc_f16,
xnn_operator_type_square_root_nc_f32,
xnn_operator_type_squared_difference_nd_f16,
xnn_operator_type_squared_difference_nd_f32,
xnn_operator_type_subtract_nd_f16,
xnn_operator_type_subtract_nd_f32,
xnn_operator_type_subtract_nd_qs8,
xnn_operator_type_subtract_nd_qu8,
xnn_operator_type_tanh_nc_f16,
xnn_operator_type_tanh_nc_f32,
xnn_operator_type_tanh_nc_qs8,
xnn_operator_type_tanh_nc_qu8,
xnn_operator_type_transpose_nd_x8,
xnn_operator_type_transpose_nd_x16,
xnn_operator_type_transpose_nd_x32,
xnn_operator_type_truncation_nc_f16,
xnn_operator_type_truncation_nc_f32,
xnn_operator_type_unpooling_nhwc_x32,
};
XNN_INTERNAL const char* xnn_operator_type_to_string(enum xnn_operator_type operator_type);
#ifdef __cplusplus
} // extern "C"
#endif
| 6,116
| 35.628743
| 91
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/operator-utils.h
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/common.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#if XNN_PLATFORM_JIT
// Generates code for all mr values up to max_mr.
// Offsets of all generated code will be kept in generated_code_offset.
XNN_INTERNAL void xnn_generate_gemms_up_to_max_mr(
size_t max_mr,
struct gemm_codegens generators,
const struct jit_gemm_params *jit_gemm_params,
size_t group_output_channels,
size_t nr,
size_t group_input_channels_in_bytes,
xnn_operator_t convolution_op);
XNN_INTERNAL void xnn_generate_igemms_up_to_max_mr(
size_t max_mr,
struct gemm_codegens generators,
const struct jit_gemm_params *jit_gemm_params,
size_t group_output_channels,
size_t nr,
size_t group_input_channels_in_bytes,
size_t kernel_size,
xnn_operator_t convolution_op);
// Overwrite function pointer to GEMM microkernels with generated code if available.
XNN_INTERNAL void xnn_overwrite_gemm_cases_with_generated_code(
xnn_operator_t convolution_op,
struct xnn_hmp_gemm_ukernel *gemm_cases,
size_t mr);
// Overwrite function pointer to IGEMM microkernels with generated code if available.
XNN_INTERNAL void xnn_overwrite_igemm_cases_with_generated_code(
xnn_operator_t convolution_op,
struct xnn_hmp_igemm_ukernel *igemm_cases,
size_t mr);
XNN_INTERNAL void xnn_generate_vunary_ukernel(
const struct xnn_unary_elementwise_config* config,
xnn_operator_t op);
#endif // XNN_PLATFORM_JIT
static inline void* packed_weights(struct xnn_operator* op) {
if (op->weights_cache == NULL) {
return op->packed_weights.pointer;
} else {
return (void*) ((uintptr_t) op->weights_cache->cache.weights.start + op->packed_weights.offset);
}
}
static inline bool use_weights_cache(struct xnn_operator* op) {
return op->weights_cache != NULL;
}
// Get a pointer to a region to pack weights into. If weights cache is available, use it, returning to a pointer to the
// cache's buffer, otherwise, allocate and return a pointer to a new region. Returns NULL on error.
XNN_INTERNAL void* xnn_get_pointer_to_write_weights(
xnn_operator_t op,
size_t aligned_weights_size,
int padding_byte);
#ifdef __cplusplus
extern "C" {
#endif
XNN_INTERNAL size_t xnn_compute_convolution_output_dimension(
size_t padded_input_dimension,
size_t kernel_dimension,
size_t dilation_dimension,
size_t subsampling_dimension);
XNN_INTERNAL size_t xnn_compute_deconvolution_output_dimension(
size_t input_dimension,
size_t output_padding_dimension,
size_t adjustment_dimension,
size_t kernel_dimension,
size_t dilation_dimension,
size_t stride_dimension);
XNN_INTERNAL size_t xnn_compute_unpooling_output_dimension(
size_t input_dimension,
size_t input_padding_dimension,
size_t kernel_dimension);
XNN_INTERNAL uint32_t xnn_get_heuristic_mr_gemm(
size_t batch_size,
uint32_t max_mr,
uint32_t nr,
struct xnn_hmp_gemm_ukernel *gemm_cases,
bool code_cache_available);
XNN_INTERNAL uint32_t xnn_get_heuristic_mr_igemm(
size_t batch_size,
uint32_t max_mr,
uint32_t nr,
struct xnn_hmp_igemm_ukernel *igemm_cases,
bool code_cache_available);
#ifdef __cplusplus
}
#endif
| 3,370
| 29.645455
| 119
|
h
|
XNNPACK
|
XNNPACK-master/src/xnnpack/operator.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <pthreadpool.h>
#include <xnnpack/allocator.h>
#include <xnnpack/cache.h>
#include <xnnpack/compute.h>
#include <xnnpack/config.h>
#include <xnnpack/microkernel-type.h>
#include <xnnpack/operator-type.h>
#include <xnnpack/params.h>
// Maximum number of pthreadpool parallelization invocations per operator.
#define XNN_MAX_COMPUTE_INVOCATIONS 2
struct xnn_ukernel_conv2d {
union {
xnn_conv_hwc2chw_ukernel_fn hwc2chw_fn;
xnn_conv_hwc_ukernel_fn hwc_fn;
};
uint8_t output_height_tile;
uint8_t output_channel_tile;
};
struct xnn_ukernel_dwconv {
union {
xnn_dwconv_unipass_ukernel_fn unipass_fn;
xnn_dwconv_multipass_ukernel_fn multipass_fn;
};
uint8_t primary_tile;
uint8_t middle_tile;
uint8_t last_tile;
// For unipass, tile_size == primary_tile, otherwise it is calculated based on
// how many pass the middle_tile runs.
uint8_t tile_size;
};
// Direct 2D Depthwise Convolution
struct xnn_ukernel_dwconv2d {
union {
xnn_dwconv2d_chw_ukernel_fn chw_fn;
};
xnn_update_chw_params_fn update_params;
uint8_t output_width_tile;
};
struct xnn_ukernel_gemm {
struct xnn_hmp_gemm_ukernel gemm_cases[XNN_MAX_MR];
xnn_packw_gemm_goi_ukernel_fn packw_gemm_goi;
uint8_t mr;
uint8_t nr;
uint8_t kr;
uint8_t sr;
};
struct xnn_ukernel_igemm {
struct xnn_hmp_igemm_ukernel igemm_cases[XNN_MAX_MR];
struct xnn_hmp_gemm_ukernel gemm_cases[XNN_MAX_MR];
uint8_t mr;
uint8_t nr;
uint8_t kr;
uint8_t sr;
};
struct xnn_ukernel_spmm {
xnn_spmm_ukernel_fn function;
uint8_t mr;
};
struct xnn_ukernel_vmulcaddc {
xnn_vmulcaddc_ukernel_fn function;
uint8_t mr;
};
struct xnn_ukernel_vbinary {
xnn_vbinary_ukernel_fn op_fn;
xnn_vbinary_ukernel_fn opc_fn;
xnn_vbinary_ukernel_fn ropc_fn;
};
struct xnn_ukernel_vunary {
xnn_vunary_ukernel_fn function;
};
struct xnn_ukernel {
enum xnn_microkernel_type type;
// Used by subconv2d whether it is a GEMM or IGEMM.
enum xnn_microkernel_type subtype;
union {
struct xnn_ukernel_conv2d conv2d;
struct xnn_ukernel_dwconv dwconv;
struct xnn_ukernel_dwconv2d dwconv2d;
struct {
struct xnn_ukernel_gemm gemm;
struct xnn_ukernel_gemm gemm2;
};
struct xnn_ukernel_igemm igemm;
struct xnn_ukernel_spmm spmm;
struct xnn_ukernel_vmulcaddc vmulcaddc;
struct xnn_ukernel_vbinary vbinary;
struct xnn_ukernel_vunary vunary;
};
};
// Valid state transitions:
// - xnn_run_state_invalid -> xnn_run_state_skip
// - xnn_run_state_invalid -> xnn_run_state_ready
// - xnn_run_state_invalid -> xnn_run_state_needs_setup -> xnn_run_state_ready
enum xnn_run_state {
// When an operator is first created, it starts off in invalid state, it needs to be setup, or reshape + setup.
xnn_run_state_invalid = 0,
// Operator is ready to be run.
xnn_run_state_ready,
// Operator doesn't need to be run.
xnn_run_state_skip,
// Operator has been reshaped, but not setup yet, pointers are not set.
xnn_run_state_needs_setup,
};
struct subconvolution_params {
void* weights;
size_t w_stride;
const void** indirection_buffer;
void* output;
size_t slice_width;
size_t slice_height;
size_t indirection_y_stride;
size_t indirection_x_stride;
// scaled_kernel_size := kernel_size * mr * sizeof(void*).
size_t scaled_kernel_size;
};
struct xnn_operator {
size_t batch_size;
uint32_t padding_top;
uint32_t padding_right;
uint32_t padding_bottom;
uint32_t padding_left;
uint32_t kernel_height;
uint32_t kernel_width;
uint32_t stride_height;
uint32_t stride_width;
uint32_t dilation_height;
uint32_t dilation_width;
uint32_t groups;
size_t group_channels;
size_t group_input_channels;
size_t group_output_channels;
size_t channels;
size_t max_sequence_size;
uint32_t pad_value;
size_t input_height;
size_t input_width;
size_t input_pixel_stride;
const void* input;
const void* input2;
const void** indirection_buffer;
size_t output_height;
size_t output_width;
size_t output_pixel_stride;
void* output;
union {
// Pointer to allocated packed weights. Use this if weights_cache is NULL.
void* pointer;
// Offset into the weights cache where the packed weights are. Only valid if weights_cache is not NULL.
size_t offset;
} packed_weights;
// Total number of non-zero kernel elements when weights use sparse representation.
size_t num_nonzero_values;
// Total number of non-zero kernel blocks when weights use sparse representation.
size_t num_nonzero_blocks;
// Total number of output channel blocks when weights use sparse representation.
size_t num_output_channel_blocks;
// Input channel corresponding to the first non-zero kernel element.
size_t first_input_channel;
float input_scale;
float output_scale;
int32_t input_zero_point;
size_t valid_batch_size;
size_t last_input_height;
size_t last_input_width;
const void* last_input;
size_t last_output_height;
size_t last_output_width;
void* last_output;
uint32_t last_mr;
uint32_t block_size;
void* zero_buffer;
void* lookup_table;
void* pixelwise_buffer;
struct subconvolution_params* subconvolution_buffer;
uint32_t flags;
union {
union xnn_f16_abs_params f16_abs;
union xnn_f16_f32_cvt_params f16_f32_cvt;
union xnn_f16_hswish_params f16_hswish;
union xnn_f16_elu_params f16_elu;
union xnn_f16_lrelu_params f16_lrelu;
union xnn_f16_neg_params f16_neg;
union xnn_f16_sigmoid_params f16_sigmoid;
union xnn_f16_tanh_params f16_tanh;
union xnn_f32_abs_params f32_abs;
union xnn_f32_default_params f32_default;
union xnn_f32_elu_params f32_elu;
union xnn_f32_lrelu_params f32_lrelu;
union xnn_f32_neg_params f32_neg;
union xnn_f32_rnd_params f32_rnd;
union xnn_f32_sigmoid_params f32_sigmoid;
union xnn_f32_sqrt_params f32_sqrt;
union xnn_f32_tanh_params f32_tanh;
// Parameters for Global Average Pooling in CHW layout
union xnn_f16_gavgpool_params f16_gavgpool;
union xnn_f32_gavgpool_params f32_gavgpool;
union xnn_f32_hswish_params f32_hswish;
// Pixelwise Average Pooling normally use f16_minmax_params, but also initialize
// f16_scaleminmax_params in case it needs to switch to Global Average Pooling operation.
struct {
union xnn_f16_minmax_params f16_minmax;
union xnn_f16_scaleminmax_params f16_scaleminmax;
};
// Mean can use either f16_f32acc_scale, or f16_scale_minmax
struct {
union xnn_f16_f32acc_scale_params f16_f32acc_scale;
union xnn_f16_scaleminmax_params f16_scale_minmax;
};
// Pixelwise Average Pooling normally use f32_minmax_params, but also initialize
// f32_scaleminmax_params in case it needs to switch to Global Average Pooling operation.
struct {
union xnn_f32_minmax_params f32_minmax;
union xnn_f32_scaleminmax_params f32_scaleminmax;
};
// Mean can use either f32_scale, or f32_scale_minmax
struct {
union xnn_f32_scale_params f32_scale;
union xnn_f32_scaleminmax_params f32_scale_minmax;
};
union xnn_f16_chw_params f16_chw;
union xnn_f32_chw_params f32_chw;
union xnn_f32_f16_cvt_params f32_f16_cvt;
union xnn_f32_qs8_cvt_params f32_qs8_cvt;
union xnn_f32_qu8_cvt_params f32_qu8_cvt;
union xnn_qs8_cvt_params qs8_cvt;
union xnn_qs8_f32_cvt_params qs8_f32_cvt;
union xnn_qs16_qs8_cvt_params qs16_qs8_cvt;
union xnn_qu8_cvt_params qu8_cvt;
union xnn_qu8_f32_cvt_params qu8_f32_cvt;
union xnn_qs8_conv_minmax_params qs8_conv_minmax;
// Average Pooling normally use qs8_avgpool_params, but also initialize qs8_gavgpool_params in case it needs to switch
// to Global Average Pooling operation.
struct {
union xnn_qs8_avgpool_minmax_params qs8_avgpool;
union xnn_qs8_avgpool_minmax_params qs8_gavgpool;
};
// Quantized Add parameters are sensitive to order of inputs, so we initialize an extra copy with the reversed order.
struct {
union xnn_qs8_add_minmax_params qs8_add;
union xnn_qs8_add_minmax_params qs8_radd;
};
struct {
union xnn_qs8_mul_minmax_params qs8_mul;
union xnn_qs8_mul_minmax_params qs8_rmul;
};
struct {
union xnn_qu8_add_minmax_params qu8_add;
union xnn_qu8_add_minmax_params qu8_radd;
};
struct {
union xnn_qu8_mul_minmax_params qu8_mul;
union xnn_qu8_mul_minmax_params qu8_rmul;
};
union xnn_qu8_conv_minmax_params qu8_conv_minmax;
// Average Pooling normally use qu8_avgpool_params, but also initialize qu8_gavgpool_params in case it needs to switch
// to Global Average Pooling operation.
struct {
union xnn_qu8_avgpool_minmax_params qu8_avgpool;
union xnn_qu8_avgpool_minmax_params qu8_gavgpool;
};
union xnn_qs8_hswish_params qs8_hswish;
union xnn_qu8_hswish_params qu8_hswish;
union xnn_qs8_lrelu_params qs8_lrelu;
union xnn_qu8_lrelu_params qu8_lrelu;
union xnn_s8_minmax_params s8_minmax;
union xnn_u8_minmax_params u8_minmax;
} params;
// Second set of params. Operators like Dynamic Fully Connected only decides on the specific config to use during
// reshape, so it needs to keep two sets of params around. Configs can have different initialization functions.
union {
union xnn_f32_minmax_params f32_minmax;
} params2;
size_t num_post_operation_params;
void* post_operation_params;
enum xnn_operator_type type;
struct xnn_ukernel ukernel;
union {
const struct xnn_argmaxpool_config* argmaxpool_config;
struct {
const struct xnn_avgpool_config* avgpool_config;
const struct xnn_gavgpool_config* gavgpool_config;
const struct xnn_pavgpool_config* pavgpool_config;
const struct xnn_reduce_config* reduce_config;
};
const struct xnn_gavgpool_cw_config* gavgpool_cw_config;
const struct xnn_ibilinear_chw_config* ibilinear_chw_config;
const struct xnn_ibilinear_config* ibilinear_config;
struct {
const struct xnn_rmax_config* rmax_config;
union {
// For QU8.
const struct xnn_lut32norm_config* lut32norm_config;
// For F16 and F32.
struct {
const struct xnn_raddstoreexpminusmax_config* raddstoreexpminusmax_config;
const struct xnn_binary_elementwise_config* vmul_config;
};
};
}; // For softmax operator.
const struct xnn_maxpool_config* maxpool_config;
const struct xnn_prelu_config* prelu_config;
const struct xnn_unpool_config* unpool_config;
const struct xnn_zip_config* zip_config;
struct {
const struct xnn_xx_fill_config* fill_config;
const struct xnn_xx_pad_config* pad_config;
}; // For constant pad operator.
const struct xnn_x8_lut_config* lut_config;
const struct xnn_cmul_config* cmul_config;
const struct xnn_unary_elementwise_config* copy_config;
const struct xnn_transpose_config* transpose_config;
struct {
const struct xnn_reduce_config* rminmax_config;
const struct xnn_unary_elementwise_config* convert_config;
}; // For F32 to QD8 convert operator.
};
struct compute_parameters compute[XNN_MAX_COMPUTE_INVOCATIONS];
union {
struct argmax_pooling_context argmax_pooling;
struct average_pooling_context average_pooling;
struct channel_shuffle_context channel_shuffle;
struct conv2d_context conv2d;
struct dwconv2d_context dwconv2d;
struct dwconv_context dwconv;
struct elementwise_binary_context elementwise_binary;
// PACKW GEMM GOI + GEMM are used together in Dynamic Fully Connected.
struct {
struct gemm_context gemm;
struct packw_gemm_goi_context packw_gemm_goi;
};
struct global_average_pooling_nwc_context global_average_pooling_nwc;
struct global_average_pooling_ncw_context global_average_pooling_ncw;
struct igemm_context igemm;
struct lut_contiguous_context lut_contiguous;
struct lut_strided_context lut_strided;
struct max_pooling_context max_pooling;
struct pad_context pad;
struct pixelwise_average_pooling_context pixelwise_average_pooling;
struct prelu_context prelu;
struct reduce_context reduce;
struct resize_bilinear_context resize_bilinear;
struct resize_bilinear_chw_context resize_bilinear_chw;
struct slice_context slice;
struct spmm_context spmm;
struct subconv_context subconv;
struct subgemm_context subgemm;
struct transpose_context transpose;
struct floating_point_softmax_context floating_point_softmax;
struct u8_softmax_context u8_softmax;
struct f32_qd8_convert_context f32_qd8_convert;
struct univector_contiguous_context univector_contiguous;
struct univector_strided_context univector_strided;
struct unpooling_context unpooling;
struct vmulcaddc_context vmulcaddc;
struct rope_context rope;
} context;
struct xnn_code_cache* code_cache;
struct xnn_weights_cache* weights_cache;
enum xnn_run_state state;
};
XNN_INTERNAL enum xnn_status xnn_run_operator_with_index(
xnn_operator_t op,
size_t opdata_index,
size_t operator_object_index,
pthreadpool_t threadpool);
| 13,461
| 31.995098
| 122
|
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.