repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-1x2-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__1x2_scalar_float(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(float));
assert(input_stride >= block_width * sizeof(float));
const size_t tile_height = 1;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(float);
const size_t input_reset = tile_wbytes - block_height * input_stride;
const size_t output_reset = tile_width * output_stride - block_height * sizeof(float);
const size_t input_offset = tile_height * input_stride;
const float* i0 = (const float*) input;
float* o0 = (float*) output;
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 1; bh -= 1) {
*o1++ = i0[1];
*o0++ = i0[0];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
i0 = (const float*) ((uintptr_t) i0 + input_reset);
o0 = (float*) ((uintptr_t) o0 + output_reset);
o1 = (float*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 1,718
| 28.637931
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-1x2-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__1x2_scalar_int(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int));
assert(input_stride >= block_width * sizeof(int));
const size_t tile_height = 1;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(int);
const size_t input_reset = tile_wbytes - block_height * input_stride;
const size_t output_reset = tile_width * output_stride - block_height * sizeof(int);
const size_t input_offset = tile_height * input_stride;
const int* i0 = (const int*) input;
int* o0 = (int*) output;
int* o1 = (int*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 1; bh -= 1) {
*o1++ = i0[1];
*o0++ = i0[0];
i0 = (const int*) ((uintptr_t) i0 + input_offset);
}
i0 = (const int*) ((uintptr_t) i0 + input_reset);
o0 = (int*) ((uintptr_t) o0 + output_reset);
o1 = (int*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 1,688
| 28.12069
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-1x4-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__1x4_scalar_float(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(float));
assert(input_stride >= block_width * sizeof(float));
const size_t tile_height = 1;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * sizeof(float);
const size_t input_reset = tile_wbytes - block_height * input_stride;
const size_t output_reset = tile_width * output_stride - block_height * sizeof(float);
const size_t input_offset = tile_height * input_stride;
const float* i0 = (const float*) input;
float* o0 = (float*) output;
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 1; bh -= 1) {
*o3++ = i0[3];
*o2++ = i0[2];
*o1++ = i0[1];
*o0++ = i0[0];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
i0 = (const float*) ((uintptr_t) i0 + input_reset);
o0 = (float*) ((uintptr_t) o0 + output_reset);
o1 = (float*) ((uintptr_t) o1 + output_reset);
o2 = (float*) ((uintptr_t) o2 + output_reset);
o3 = (float*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,107
| 29.114286
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-1x4-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__1x4_scalar_int(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int));
assert(input_stride >= block_width * sizeof(int));
const size_t tile_height = 1;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * sizeof(int);
const size_t input_reset = tile_wbytes - block_height * input_stride;
const size_t output_reset = tile_width * output_stride - block_height * sizeof(int);
const size_t input_offset = tile_height * input_stride;
const int* i0 = (const int*) input;
int* o0 = (int*) output;
int* o1 = (int*) ((uintptr_t) o0 + output_stride);
int* o2 = (int*) ((uintptr_t) o1 + output_stride);
int* o3 = (int*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 1; bh -= 1) {
*o3++ = i0[3];
*o2++ = i0[2];
*o1++ = i0[1];
*o0++ = i0[0];
i0 = (const int*) ((uintptr_t) i0 + input_offset);
}
i0 = (const int*) ((uintptr_t) i0 + input_reset);
o0 = (int*) ((uintptr_t) o0 + output_reset);
o1 = (int*) ((uintptr_t) o1 + output_reset);
o2 = (int*) ((uintptr_t) o2 + output_reset);
o3 = (int*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,065
| 28.514286
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-2x1-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__2x1_scalar_float(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(float));
assert(input_stride >= block_width * sizeof(float));
const size_t tile_height = 2;
const size_t tile_width = 1;
const size_t tile_wbytes = tile_width * sizeof(float);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(float);
const size_t input_offset = tile_height * input_stride;
const float* i0 = (const float*) input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o0 = (float*) output;
do {
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o0[0] = i0[0];
}
i0 = (const float*) ((uintptr_t) i0 + input_reset);
i1 = (const float*) ((uintptr_t) i0 + input_stride);
o0 = (float*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 1,822
| 29.898305
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-2x1-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__2x1_scalar_int(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int));
assert(input_stride >= block_width * sizeof(int));
const size_t tile_height = 2;
const size_t tile_width = 1;
const size_t tile_wbytes = tile_width * sizeof(int);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int);
const size_t input_offset = tile_height * input_stride;
const int* i0 = (const int*) input;
const int* i1 = (const int*) ((uintptr_t) i0 + input_stride);
int* o0 = (int*) output;
do {
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const int*) ((uintptr_t) i0 + input_offset);
i1 = (const int*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o0[0] = i0[0];
}
i0 = (const int*) ((uintptr_t) i0 + input_reset);
i1 = (const int*) ((uintptr_t) i0 + input_stride);
o0 = (int*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 1,790
| 29.355932
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-2x2-multi-dec-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__2x2_multi_dec_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t) - tile_hbytes;
const uint32_t* i0 = input;
const uint32_t* i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
uint32_t* o = (uint32_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint32x2_t v1_0 = vld1_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset);
const uint32x2_t v1_1 = vld1_u32(i1); i1 = (uint32_t*) ((uintptr_t) i1 + input_offset);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
o = (uint32_t*) ((uintptr_t) o + oN_offset);
vst1_u32(o, v0_0.val[1]);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u32(o, v0_0.val[0]);
}
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint32x2_t v1_0 = vld1_u32(i0);
const uint32x2_t v1_1 = vmov_n_u32(0);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
uint32x2_t v0_low = v0_0.val[0];
uint32x2_t v1_low = v0_0.val[1];
if (bh & 1) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32(o, v1_low, 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32(o, v0_low, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,057
| 32.977778
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-2x2-multi-mov-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__2x2_multi_mov_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t) - tile_hbytes;
const uint32_t* i0 = input;
const uint32_t* i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
uint32_t* o = (uint32_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint32x2_t v1_0 = vld1_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset);
const uint32x2_t v1_1 = vld1_u32(i1); i1 = (uint32_t*) ((uintptr_t) i1 + input_offset);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
o = (uint32_t*) ((uintptr_t) o + oN_offset);
vst1_u32(o, v0_0.val[1]);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_u32(o, v0_0.val[0]);
}
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint32x2_t v1_0 = vld1_u32(i0);
const uint32x2_t v1_1 = vmov_n_u32(0);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
uint32x2_t v0_low = v0_0.val[0];
uint32x2_t v1_low = v0_0.val[1];
if (bh & 1) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32(o, v1_low, 0);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u32(o, v0_low, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,109
| 32.804348
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-2x2-multi-multi-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__2x2_multi_multi_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
const uint32_t* i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
uint32_t* o0 = (uint32_t*) output;
uint32_t* o1 = (uint32_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint32x2_t v1_0 = vld1_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset);
const uint32x2_t v1_1 = vld1_u32(i1); i1 = (uint32_t*) ((uintptr_t) i1 + input_offset);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
vst1_u32(o1, v0_0.val[1]); o1 = (uint32_t*) ((uintptr_t) o1 + tile_hbytes);
vst1_u32(o0, v0_0.val[0]); o0 = (uint32_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const uint32x2_t v1_0 = vld1_u32(i0);
const uint32x2_t v1_1 = vmov_n_u32(0);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
uint32x2_t v0_low = v0_0.val[0];
uint32x2_t v1_low = v0_0.val[1];
if (bh & 1) {
vst1_lane_u32(o1, v1_low, 0);
vst1_lane_u32(o0, v0_low, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
o0 = (uint32_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint32_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,704
| 31.987805
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-2x2-multi-switch-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__2x2_multi_switch_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
const uint32_t* i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
uint32_t* o = (uint32_t*) output;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint32x2_t v1_0 = vld1_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset);
const uint32x2_t v1_1 = vld1_u32(i1); i1 = (uint32_t*) ((uintptr_t) i1 + input_offset);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 1:
vst1_u32(oN, v0_0.val[1]);
case 0:
vst1_u32(o, v0_0.val[0]); o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const uint32x2_t v1_0 = vld1_u32(i0);
const uint32x2_t v1_1 = vmov_n_u32(0);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
uint32x2_t v0_low = v0_0.val[0];
uint32x2_t v1_low = v0_0.val[1];
if (bh & 1) {
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 1:
vst1_lane_u32(oN, v1_low, 0);
case 0:
vst1_lane_u32(o, v0_low, 0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,965
| 30.221053
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-2x2-reuse-dec-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__2x2_reuse_dec_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t) - tile_hbytes;
const uint32_t* i0 = input;
uint32_t* o = (uint32_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint32x2_t v1_0 = vld1_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x2_t v1_1 = vld1_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
o = (uint32_t*) ((uintptr_t) o + oN_offset);
vst1_u32(o, v0_0.val[1]);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u32(o, v0_0.val[0]);
}
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint32x2_t v1_0 = vld1_u32(i0);
const uint32x2_t v1_1 = vmov_n_u32(0);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
uint32x2_t v0_low = v0_0.val[0];
uint32x2_t v1_low = v0_0.val[1];
if (bh & 1) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32(o, v1_low, 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32(o, v0_low, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,865
| 31.942529
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-2x2-reuse-mov-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__2x2_reuse_mov_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t) - tile_hbytes;
const uint32_t* i0 = input;
uint32_t* o = (uint32_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint32x2_t v1_0 = vld1_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x2_t v1_1 = vld1_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
o = (uint32_t*) ((uintptr_t) o + oN_offset);
vst1_u32(o, v0_0.val[1]);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_u32(o, v0_0.val[0]);
}
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint32x2_t v1_0 = vld1_u32(i0);
const uint32x2_t v1_1 = vmov_n_u32(0);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
uint32x2_t v0_low = v0_0.val[0];
uint32x2_t v1_low = v0_0.val[1];
if (bh & 1) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32(o, v1_low, 0);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u32(o, v0_low, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,917
| 31.786517
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-2x2-reuse-multi-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__2x2_reuse_multi_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
uint32_t* o0 = (uint32_t*) output;
uint32_t* o1 = (uint32_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint32x2_t v1_0 = vld1_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x2_t v1_1 = vld1_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
vst1_u32(o1, v0_0.val[1]); o1 = (uint32_t*) ((uintptr_t) o1 + tile_hbytes);
vst1_u32(o0, v0_0.val[0]); o0 = (uint32_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const uint32x2_t v1_0 = vld1_u32(i0);
const uint32x2_t v1_1 = vmov_n_u32(0);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
uint32x2_t v0_low = v0_0.val[0];
uint32x2_t v1_low = v0_0.val[1];
if (bh & 1) {
vst1_lane_u32(o1, v1_low, 0);
vst1_lane_u32(o0, v0_low, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
o0 = (uint32_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint32_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,512
| 30.810127
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-2x2-reuse-switch-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__2x2_reuse_switch_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
uint32_t* o = (uint32_t*) output;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint32x2_t v1_0 = vld1_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x2_t v1_1 = vld1_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 1:
vst1_u32(oN, v0_0.val[1]);
case 0:
vst1_u32(o, v0_0.val[0]); o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const uint32x2_t v1_0 = vld1_u32(i0);
const uint32x2_t v1_1 = vmov_n_u32(0);
const uint32x2x2_t v0_0 = vzip_u32(v1_0, v1_1);
uint32x2_t v0_low = v0_0.val[0];
uint32x2_t v1_low = v0_0.val[1];
if (bh & 1) {
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 1:
vst1_lane_u32(oN, v1_low, 0);
case 0:
vst1_lane_u32(o, v0_low, 0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,773
| 29.152174
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-2x2-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__2x2_scalar_float(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(float));
assert(input_stride >= block_width * sizeof(float));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(float);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(float);
const size_t input_offset = tile_height * input_stride;
const float* i0 = (const float*) input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o0 = (float*) output;
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o1++ = i0[1];
*o1++ = i1[1];
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o1[0] = i0[1];
o0[0] = i0[0];
}
i0 = (const float*) ((uintptr_t) i0 + input_reset);
i1 = (const float*) ((uintptr_t) i0 + input_stride);
o0 = (float*) ((uintptr_t) o0 + output_reset);
o1 = (float*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,058
| 29.731343
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-2x2-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__2x2_scalar_int(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int));
assert(input_stride >= block_width * sizeof(int));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(int);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int);
const size_t input_offset = tile_height * input_stride;
const int* i0 = (const int*) input;
const int* i1 = (const int*) ((uintptr_t) i0 + input_stride);
int* o0 = (int*) output;
int* o1 = (int*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o1++ = i0[1];
*o1++ = i1[1];
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const int*) ((uintptr_t) i0 + input_offset);
i1 = (const int*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o1[0] = i0[1];
o0[0] = i0[0];
}
i0 = (const int*) ((uintptr_t) i0 + input_reset);
i1 = (const int*) ((uintptr_t) i0 + input_stride);
o0 = (int*) ((uintptr_t) o0 + output_reset);
o1 = (int*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,020
| 29.164179
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-2x4-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__2x4_scalar_float(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(float));
assert(input_stride >= block_width * sizeof(float));
const size_t tile_height = 2;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * sizeof(float);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(float);
const size_t input_offset = tile_height * input_stride;
const float* i0 = (const float*) input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o0 = (float*) output;
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o3++ = i0[3];
*o3++ = i1[3];
*o2++ = i0[2];
*o2++ = i1[2];
*o1++ = i0[1];
*o1++ = i1[1];
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o3[0] = i0[3];
o2[0] = i0[2];
o1[0] = i0[1];
o0[0] = i0[0];
}
i0 = (const float*) ((uintptr_t) i0 + input_reset);
i1 = (const float*) ((uintptr_t) i0 + input_stride);
o0 = (float*) ((uintptr_t) o0 + output_reset);
o1 = (float*) ((uintptr_t) o1 + output_reset);
o2 = (float*) ((uintptr_t) o2 + output_reset);
o3 = (float*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,531
| 29.506024
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-2x4-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__2x4_scalar_int(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int));
assert(input_stride >= block_width * sizeof(int));
const size_t tile_height = 2;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * sizeof(int);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int);
const size_t input_offset = tile_height * input_stride;
const int* i0 = (const int*) input;
const int* i1 = (const int*) ((uintptr_t) i0 + input_stride);
int* o0 = (int*) output;
int* o1 = (int*) ((uintptr_t) o0 + output_stride);
int* o2 = (int*) ((uintptr_t) o1 + output_stride);
int* o3 = (int*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o3++ = i0[3];
*o3++ = i1[3];
*o2++ = i0[2];
*o2++ = i1[2];
*o1++ = i0[1];
*o1++ = i1[1];
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const int*) ((uintptr_t) i0 + input_offset);
i1 = (const int*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o3[0] = i0[3];
o2[0] = i0[2];
o1[0] = i0[1];
o0[0] = i0[0];
}
i0 = (const int*) ((uintptr_t) i0 + input_reset);
i1 = (const int*) ((uintptr_t) i0 + input_stride);
o0 = (int*) ((uintptr_t) o0 + output_reset);
o1 = (int*) ((uintptr_t) o1 + output_reset);
o2 = (int*) ((uintptr_t) o2 + output_reset);
o3 = (int*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,481
| 28.903614
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x1-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x1_scalar_float(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(float));
assert(input_stride >= block_width * sizeof(float));
const size_t tile_height = 4;
const size_t tile_width = 1;
const size_t tile_wbytes = tile_width * sizeof(float);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(float);
const size_t input_offset = tile_height * input_stride;
const float* i0 = (const float*) input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o0 = (float*) output;
do {
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i = i0;
if (bh & 2) {
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o0[0] = i[0];
}
i0 = (const float*) ((uintptr_t) i0 + input_reset);
i1 = (const float*) ((uintptr_t) i0 + input_stride);
i2 = (const float*) ((uintptr_t) i1 + input_stride);
i3 = (const float*) ((uintptr_t) i2 + input_stride);
o0 = (float*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,351
| 30.783784
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x1-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x1_scalar_int(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int));
assert(input_stride >= block_width * sizeof(int));
const size_t tile_height = 4;
const size_t tile_width = 1;
const size_t tile_wbytes = tile_width * sizeof(int);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int);
const size_t input_offset = tile_height * input_stride;
const int* i0 = (const int*) input;
const int* i1 = (const int*) ((uintptr_t) i0 + input_stride);
const int* i2 = (const int*) ((uintptr_t) i1 + input_stride);
const int* i3 = (const int*) ((uintptr_t) i2 + input_stride);
int* o0 = (int*) output;
do {
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const int*) ((uintptr_t) i0 + input_offset);
i1 = (const int*) ((uintptr_t) i1 + input_offset);
i2 = (const int*) ((uintptr_t) i2 + input_offset);
i3 = (const int*) ((uintptr_t) i3 + input_offset);
}
const int* i = i0;
if (bh & 2) {
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o0[0] = i[0];
}
i0 = (const int*) ((uintptr_t) i0 + input_reset);
i1 = (const int*) ((uintptr_t) i0 + input_stride);
i2 = (const int*) ((uintptr_t) i1 + input_stride);
i3 = (const int*) ((uintptr_t) i2 + input_stride);
o0 = (int*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,301
| 30.108108
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x2-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x2_scalar_float(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(float));
assert(input_stride >= block_width * sizeof(float));
const size_t tile_height = 4;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(float);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(float);
const size_t input_offset = tile_height * input_stride;
const float* i0 = (const float*) input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o0 = (float*) output;
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o1++ = i0[1];
*o1++ = i1[1];
*o1++ = i2[1];
*o1++ = i3[1];
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i = i0;
if (bh & 2) {
o1[0] = i0[1];
o1[1] = i1[1];
o1 += 2;
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o1[0] = i[1];
o0[0] = i[0];
}
i0 = (const float*) ((uintptr_t) i0 + input_reset);
i1 = (const float*) ((uintptr_t) i0 + input_stride);
i2 = (const float*) ((uintptr_t) i1 + input_stride);
i3 = (const float*) ((uintptr_t) i2 + input_stride);
o0 = (float*) ((uintptr_t) o0 + output_reset);
o1 = (float*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,685
| 29.873563
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x2-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x2_scalar_int(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int));
assert(input_stride >= block_width * sizeof(int));
const size_t tile_height = 4;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(int);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int);
const size_t input_offset = tile_height * input_stride;
const int* i0 = (const int*) input;
const int* i1 = (const int*) ((uintptr_t) i0 + input_stride);
const int* i2 = (const int*) ((uintptr_t) i1 + input_stride);
const int* i3 = (const int*) ((uintptr_t) i2 + input_stride);
int* o0 = (int*) output;
int* o1 = (int*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o1++ = i0[1];
*o1++ = i1[1];
*o1++ = i2[1];
*o1++ = i3[1];
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const int*) ((uintptr_t) i0 + input_offset);
i1 = (const int*) ((uintptr_t) i1 + input_offset);
i2 = (const int*) ((uintptr_t) i2 + input_offset);
i3 = (const int*) ((uintptr_t) i3 + input_offset);
}
const int* i = i0;
if (bh & 2) {
o1[0] = i0[1];
o1[1] = i1[1];
o1 += 2;
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o1[0] = i[1];
o0[0] = i[0];
}
i0 = (const int*) ((uintptr_t) i0 + input_reset);
i1 = (const int*) ((uintptr_t) i0 + input_stride);
i2 = (const int*) ((uintptr_t) i1 + input_stride);
i3 = (const int*) ((uintptr_t) i2 + input_stride);
o0 = (int*) ((uintptr_t) o0 + output_reset);
o1 = (int*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,629
| 29.229885
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-multi-dec-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_multi_dec_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t) - tile_hbytes;
const uint32_t* i0 = input;
const uint32_t* i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32_t* i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
const uint32_t* i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
uint32_t* o = (uint32_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset);
const uint32x4_t v2_1 = vld1q_u32(i1); i1 = (uint32_t*) ((uintptr_t) i1 + input_offset);
const uint32x4_t v2_2 = vld1q_u32(i2); i2 = (uint32_t*) ((uintptr_t) i2 + input_offset);
const uint32x4_t v2_3 = vld1q_u32(i3); i3 = (uint32_t*) ((uintptr_t) i3 + input_offset);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
o = (uint32_t*) ((uintptr_t) o + oN_offset);
vst1q_u32(o, v0_1.val[1]);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u32(o, v0_1.val[0]);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u32(o, v0_0.val[1]);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u32(o, v0_0.val[0]);
}
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint32x4_t v2_0 = vld1q_u32(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint32x4_t v2_1 = vld1q_u32(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const uint32x4_t v2_2 = vld1q_u32(i2);
const uint32x4_t v2_3 = vmovq_n_u32(0);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
uint32x2_t v0_low = vget_low_u32(v0_0.val[0]);
uint32x2_t v1_low = vget_low_u32(v0_0.val[1]);
uint32x2_t v2_low = vget_low_u32(v0_1.val[0]);
uint32x2_t v3_low = vget_low_u32(v0_1.val[1]);
if (bh & 2) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
vst1_u32(o, v3_low);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u32(o, v2_low);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u32(o, v1_low);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u32(o, v0_low); o += 2;
v0_low = vget_high_u32(v0_0.val[0]);
v1_low = vget_high_u32(v0_0.val[1]);
v2_low = vget_high_u32(v0_1.val[0]);
v3_low = vget_high_u32(v0_1.val[1]);
}
if (bh & 1) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32(o, v3_low, 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32(o, v2_low, 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32(o, v1_low, 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32(o, v0_low, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,636
| 36.832215
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-multi-mov-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x32_transposec_ukernel__4x4_multi_mov_sse2(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t) - tile_hbytes;
const uint32_t* i0 = input;
const uint32_t* i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32_t* i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
const uint32_t* i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
uint32_t* o = (uint32_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_offset);
const __m128i v2_1 = _mm_loadu_si128((const __m128i*) i1);
i1 = (uint32_t*) ((uintptr_t) i1 + input_offset);
const __m128i v2_2 = _mm_loadu_si128((const __m128i*) i2);
i2 = (uint32_t*) ((uintptr_t) i2 + input_offset);
const __m128i v2_3 = _mm_loadu_si128((const __m128i*) i3);
i3 = (uint32_t*) ((uintptr_t) i3 + input_offset);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_1);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_1);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_2, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_2, v2_3);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_2);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_2);
const __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_3);
const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3);
o = (uint32_t*) ((uintptr_t) o + oN_offset);
_mm_storeu_si128((__m128i*) o, v0_3);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_2);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_1);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_0);
}
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m128i v2_1 = _mm_loadu_si128((const __m128i*) i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const __m128i v2_2 = _mm_loadu_si128((const __m128i*) i2);
const __m128i v2_3 = _mm_undefined_si128();
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_1);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_1);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_2, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_2, v2_3);
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_2);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_2);
__m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_3);
__m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3);
if (bh & 2) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
_mm_storel_epi64((__m128i*) o, v0_3);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_2);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_1);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_0);
o += 2;
v0_0 = _mm_unpackhi_epi64(v0_0, v0_0);
v0_1 = _mm_unpackhi_epi64(v0_1, v0_1);
v0_2 = _mm_unpackhi_epi64(v0_2, v0_2);
v0_3 = _mm_unpackhi_epi64(v0_3, v0_3);
}
if (bh & 1) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_3));
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_2));
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_1));
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_0));
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 6,472
| 36.853801
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-multi-mov-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <wasm_simd128.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_multi_mov_wasmsimd(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t) - tile_hbytes;
const uint32_t* i0 = input;
const uint32_t* i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32_t* i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
const uint32_t* i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
uint32_t* o = (uint32_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const v128_t v2_0 = wasm_v128_load(i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_offset);
const v128_t v2_1 = wasm_v128_load(i1);
i1 = (uint32_t*) ((uintptr_t) i1 + input_offset);
const v128_t v2_2 = wasm_v128_load(i2);
i2 = (uint32_t*) ((uintptr_t) i2 + input_offset);
const v128_t v2_3 = wasm_v128_load(i3);
i3 = (uint32_t*) ((uintptr_t) i3 + input_offset);
const v128_t v1_0 = wasm_v32x4_shuffle(v2_0, v2_2, 0, 4, 1, 5);
const v128_t v1_1 = wasm_v32x4_shuffle(v2_0, v2_2, 2, 6, 3, 7);
const v128_t v1_2 = wasm_v32x4_shuffle(v2_1, v2_3, 0, 4, 1, 5);
const v128_t v1_3 = wasm_v32x4_shuffle(v2_1, v2_3, 2, 6, 3, 7);
const v128_t v0_0 = wasm_v32x4_shuffle(v1_0, v1_2, 0, 4, 1, 5);
const v128_t v0_1 = wasm_v32x4_shuffle(v1_0, v1_2, 2, 6, 3, 7);
const v128_t v0_2 = wasm_v32x4_shuffle(v1_1, v1_3, 0, 4, 1, 5);
const v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7);
o = (uint32_t*) ((uintptr_t) o + oN_offset);
wasm_v128_store(o, v0_3);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
wasm_v128_store(o, v0_2);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
wasm_v128_store(o, v0_1);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
wasm_v128_store(o, v0_0);
}
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const v128_t v2_0 = wasm_v128_load(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const v128_t v2_1 = wasm_v128_load(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const v128_t v2_2 = wasm_v128_load(i2);
const v128_t v2_3 = wasm_v128_xor(v2_0, v2_0);
const v128_t v1_0 = wasm_v32x4_shuffle(v2_0, v2_2, 0, 4, 1, 5);
const v128_t v1_1 = wasm_v32x4_shuffle(v2_0, v2_2, 2, 6, 3, 7);
const v128_t v1_2 = wasm_v32x4_shuffle(v2_1, v2_3, 0, 4, 1, 5);
const v128_t v1_3 = wasm_v32x4_shuffle(v2_1, v2_3, 2, 6, 3, 7);
v128_t v0_0 = wasm_v32x4_shuffle(v1_0, v1_2, 0, 4, 1, 5);
v128_t v0_1 = wasm_v32x4_shuffle(v1_0, v1_2, 2, 6, 3, 7);
v128_t v0_2 = wasm_v32x4_shuffle(v1_1, v1_3, 0, 4, 1, 5);
v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7);
if (bh & 2) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
wasm_v128_store64_lane(o, v0_3, 0);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
wasm_v128_store64_lane(o, v0_2, 0);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
wasm_v128_store64_lane(o, v0_1, 0);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
wasm_v128_store64_lane(o, v0_0, 0);
o += 2;
v0_0 = wasm_v64x2_shuffle(v0_0, v0_0, 1, 1);
v0_1 = wasm_v64x2_shuffle(v0_1, v0_1, 1, 1);
v0_2 = wasm_v64x2_shuffle(v0_2, v0_2, 1, 1);
v0_3 = wasm_v64x2_shuffle(v0_3, v0_3, 1, 1);
}
if (bh & 1) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
wasm_v128_store32_lane(o, v0_3, 0);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
wasm_v128_store32_lane(o, v0_2, 0);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
wasm_v128_store32_lane(o, v0_1, 0);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
wasm_v128_store32_lane(o, v0_0, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 6,365
| 37.581818
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-multi-mov-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_multi_mov_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t) - tile_hbytes;
const uint32_t* i0 = input;
const uint32_t* i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32_t* i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
const uint32_t* i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
uint32_t* o = (uint32_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset);
const uint32x4_t v2_1 = vld1q_u32(i1); i1 = (uint32_t*) ((uintptr_t) i1 + input_offset);
const uint32x4_t v2_2 = vld1q_u32(i2); i2 = (uint32_t*) ((uintptr_t) i2 + input_offset);
const uint32x4_t v2_3 = vld1q_u32(i3); i3 = (uint32_t*) ((uintptr_t) i3 + input_offset);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
o = (uint32_t*) ((uintptr_t) o + oN_offset);
vst1q_u32(o, v0_1.val[1]);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1q_u32(o, v0_1.val[0]);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1q_u32(o, v0_0.val[1]);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1q_u32(o, v0_0.val[0]);
}
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint32x4_t v2_0 = vld1q_u32(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint32x4_t v2_1 = vld1q_u32(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const uint32x4_t v2_2 = vld1q_u32(i2);
const uint32x4_t v2_3 = vmovq_n_u32(0);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
uint32x2_t v0_low = vget_low_u32(v0_0.val[0]);
uint32x2_t v1_low = vget_low_u32(v0_0.val[1]);
uint32x2_t v2_low = vget_low_u32(v0_1.val[0]);
uint32x2_t v3_low = vget_low_u32(v0_1.val[1]);
if (bh & 2) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
vst1_u32(o, v3_low);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_u32(o, v2_low);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_u32(o, v1_low);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_u32(o, v0_low); o += 2;
v0_low = vget_high_u32(v0_0.val[0]);
v1_low = vget_high_u32(v0_0.val[1]);
v2_low = vget_high_u32(v0_1.val[0]);
v3_low = vget_high_u32(v0_1.val[1]);
}
if (bh & 1) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32(o, v3_low, 0);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u32(o, v2_low, 0);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u32(o, v1_low, 0);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u32(o, v0_low, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,813
| 35.797468
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-multi-multi-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x32_transposec_ukernel__4x4_multi_multi_sse2(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
const uint32_t* i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32_t* i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
const uint32_t* i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
uint32_t* o0 = (uint32_t*) output;
uint32_t* o1 = (uint32_t*) ((uintptr_t) o0 + output_stride);
uint32_t* o2 = (uint32_t*) ((uintptr_t) o1 + output_stride);
uint32_t* o3 = (uint32_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_offset);
const __m128i v2_1 = _mm_loadu_si128((const __m128i*) i1);
i1 = (uint32_t*) ((uintptr_t) i1 + input_offset);
const __m128i v2_2 = _mm_loadu_si128((const __m128i*) i2);
i2 = (uint32_t*) ((uintptr_t) i2 + input_offset);
const __m128i v2_3 = _mm_loadu_si128((const __m128i*) i3);
i3 = (uint32_t*) ((uintptr_t) i3 + input_offset);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_1);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_1);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_2, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_2, v2_3);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_2);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_2);
const __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_3);
const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3);
_mm_storeu_si128((__m128i*) o3, v0_3);
o3 = (uint32_t*) ((uintptr_t) o3 + tile_hbytes);
_mm_storeu_si128((__m128i*) o2, v0_2);
o2 = (uint32_t*) ((uintptr_t) o2 + tile_hbytes);
_mm_storeu_si128((__m128i*) o1, v0_1);
o1 = (uint32_t*) ((uintptr_t) o1 + tile_hbytes);
_mm_storeu_si128((__m128i*) o0, v0_0);
o0 = (uint32_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m128i v2_1 = _mm_loadu_si128((const __m128i*) i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const __m128i v2_2 = _mm_loadu_si128((const __m128i*) i2);
const __m128i v2_3 = _mm_undefined_si128();
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_1);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_1);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_2, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_2, v2_3);
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_2);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_2);
__m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_3);
__m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3);
if (bh & 2) {
_mm_storel_epi64((__m128i*) o3, v0_3);
o3 += 2;
_mm_storel_epi64((__m128i*) o2, v0_2);
o2 += 2;
_mm_storel_epi64((__m128i*) o1, v0_1);
o1 += 2;
_mm_storel_epi64((__m128i*) o0, v0_0);
o0 += 2;
v0_0 = _mm_unpackhi_epi64(v0_0, v0_0);
v0_1 = _mm_unpackhi_epi64(v0_1, v0_1);
v0_2 = _mm_unpackhi_epi64(v0_2, v0_2);
v0_3 = _mm_unpackhi_epi64(v0_3, v0_3);
}
if (bh & 1) {
unaligned_store_u32(o3, (uint32_t) _mm_cvtsi128_si32(v0_3));
unaligned_store_u32(o2, (uint32_t) _mm_cvtsi128_si32(v0_2));
unaligned_store_u32(o1, (uint32_t) _mm_cvtsi128_si32(v0_1));
unaligned_store_u32(o0, (uint32_t) _mm_cvtsi128_si32(v0_0));
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
o0 = (uint32_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint32_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint32_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint32_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,584
| 36.483221
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-multi-multi-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <wasm_simd128.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_multi_multi_wasmsimd(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
const uint32_t* i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32_t* i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
const uint32_t* i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
uint32_t* o0 = (uint32_t*) output;
uint32_t* o1 = (uint32_t*) ((uintptr_t) o0 + output_stride);
uint32_t* o2 = (uint32_t*) ((uintptr_t) o1 + output_stride);
uint32_t* o3 = (uint32_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const v128_t v2_0 = wasm_v128_load(i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_offset);
const v128_t v2_1 = wasm_v128_load(i1);
i1 = (uint32_t*) ((uintptr_t) i1 + input_offset);
const v128_t v2_2 = wasm_v128_load(i2);
i2 = (uint32_t*) ((uintptr_t) i2 + input_offset);
const v128_t v2_3 = wasm_v128_load(i3);
i3 = (uint32_t*) ((uintptr_t) i3 + input_offset);
const v128_t v1_0 = wasm_v32x4_shuffle(v2_0, v2_2, 0, 4, 1, 5);
const v128_t v1_1 = wasm_v32x4_shuffle(v2_0, v2_2, 2, 6, 3, 7);
const v128_t v1_2 = wasm_v32x4_shuffle(v2_1, v2_3, 0, 4, 1, 5);
const v128_t v1_3 = wasm_v32x4_shuffle(v2_1, v2_3, 2, 6, 3, 7);
const v128_t v0_0 = wasm_v32x4_shuffle(v1_0, v1_2, 0, 4, 1, 5);
const v128_t v0_1 = wasm_v32x4_shuffle(v1_0, v1_2, 2, 6, 3, 7);
const v128_t v0_2 = wasm_v32x4_shuffle(v1_1, v1_3, 0, 4, 1, 5);
const v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7);
wasm_v128_store(o3, v0_3);
o3 = (uint32_t*) ((uintptr_t) o3 + tile_hbytes);
wasm_v128_store(o2, v0_2);
o2 = (uint32_t*) ((uintptr_t) o2 + tile_hbytes);
wasm_v128_store(o1, v0_1);
o1 = (uint32_t*) ((uintptr_t) o1 + tile_hbytes);
wasm_v128_store(o0, v0_0);
o0 = (uint32_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const v128_t v2_0 = wasm_v128_load(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const v128_t v2_1 = wasm_v128_load(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const v128_t v2_2 = wasm_v128_load(i2);
const v128_t v2_3 = wasm_v128_xor(v2_0, v2_0);
const v128_t v1_0 = wasm_v32x4_shuffle(v2_0, v2_2, 0, 4, 1, 5);
const v128_t v1_1 = wasm_v32x4_shuffle(v2_0, v2_2, 2, 6, 3, 7);
const v128_t v1_2 = wasm_v32x4_shuffle(v2_1, v2_3, 0, 4, 1, 5);
const v128_t v1_3 = wasm_v32x4_shuffle(v2_1, v2_3, 2, 6, 3, 7);
v128_t v0_0 = wasm_v32x4_shuffle(v1_0, v1_2, 0, 4, 1, 5);
v128_t v0_1 = wasm_v32x4_shuffle(v1_0, v1_2, 2, 6, 3, 7);
v128_t v0_2 = wasm_v32x4_shuffle(v1_1, v1_3, 0, 4, 1, 5);
v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7);
if (bh & 2) {
wasm_v128_store64_lane(o3, v0_3, 0);
o3 += 2;
wasm_v128_store64_lane(o2, v0_2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, v0_1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, v0_0, 0);
o0 += 2;
v0_0 = wasm_v64x2_shuffle(v0_0, v0_0, 1, 1);
v0_1 = wasm_v64x2_shuffle(v0_1, v0_1, 1, 1);
v0_2 = wasm_v64x2_shuffle(v0_2, v0_2, 1, 1);
v0_3 = wasm_v64x2_shuffle(v0_3, v0_3, 1, 1);
}
if (bh & 1) {
wasm_v128_store32_lane(o3, v0_3, 0);
wasm_v128_store32_lane(o2, v0_2, 0);
wasm_v128_store32_lane(o1, v0_1, 0);
wasm_v128_store32_lane(o0, v0_0, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
o0 = (uint32_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint32_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint32_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint32_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,477
| 37.307692
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-multi-multi-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_multi_multi_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
const uint32_t* i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32_t* i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
const uint32_t* i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
uint32_t* o0 = (uint32_t*) output;
uint32_t* o1 = (uint32_t*) ((uintptr_t) o0 + output_stride);
uint32_t* o2 = (uint32_t*) ((uintptr_t) o1 + output_stride);
uint32_t* o3 = (uint32_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset);
const uint32x4_t v2_1 = vld1q_u32(i1); i1 = (uint32_t*) ((uintptr_t) i1 + input_offset);
const uint32x4_t v2_2 = vld1q_u32(i2); i2 = (uint32_t*) ((uintptr_t) i2 + input_offset);
const uint32x4_t v2_3 = vld1q_u32(i3); i3 = (uint32_t*) ((uintptr_t) i3 + input_offset);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
vst1q_u32(o3, v0_1.val[1]); o3 = (uint32_t*) ((uintptr_t) o3 + tile_hbytes);
vst1q_u32(o2, v0_1.val[0]); o2 = (uint32_t*) ((uintptr_t) o2 + tile_hbytes);
vst1q_u32(o1, v0_0.val[1]); o1 = (uint32_t*) ((uintptr_t) o1 + tile_hbytes);
vst1q_u32(o0, v0_0.val[0]); o0 = (uint32_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const uint32x4_t v2_0 = vld1q_u32(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint32x4_t v2_1 = vld1q_u32(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const uint32x4_t v2_2 = vld1q_u32(i2);
const uint32x4_t v2_3 = vmovq_n_u32(0);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
uint32x2_t v0_low = vget_low_u32(v0_0.val[0]);
uint32x2_t v1_low = vget_low_u32(v0_0.val[1]);
uint32x2_t v2_low = vget_low_u32(v0_1.val[0]);
uint32x2_t v3_low = vget_low_u32(v0_1.val[1]);
if (bh & 2) {
vst1_u32(o3, v3_low); o3 += 2;
vst1_u32(o2, v2_low); o2 += 2;
vst1_u32(o1, v1_low); o1 += 2;
vst1_u32(o0, v0_low); o0 += 2;
v0_low = vget_high_u32(v0_0.val[0]);
v1_low = vget_high_u32(v0_0.val[1]);
v2_low = vget_high_u32(v0_1.val[0]);
v3_low = vget_high_u32(v0_1.val[1]);
}
if (bh & 1) {
vst1_lane_u32(o3, v3_low, 0);
vst1_lane_u32(o2, v2_low, 0);
vst1_lane_u32(o1, v1_low, 0);
vst1_lane_u32(o0, v0_low, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
o0 = (uint32_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint32_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint32_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint32_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 4,877
| 36.813953
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-multi-switch-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x32_transposec_ukernel__4x4_multi_switch_sse2(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
const uint32_t* i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32_t* i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
const uint32_t* i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
uint32_t* o = (uint32_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_offset);
const __m128i v2_1 = _mm_loadu_si128((const __m128i*) i1);
i1 = (uint32_t*) ((uintptr_t) i1 + input_offset);
const __m128i v2_2 = _mm_loadu_si128((const __m128i*) i2);
i2 = (uint32_t*) ((uintptr_t) i2 + input_offset);
const __m128i v2_3 = _mm_loadu_si128((const __m128i*) i3);
i3 = (uint32_t*) ((uintptr_t) i3 + input_offset);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_1);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_1);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_2, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_2, v2_3);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_2);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_2);
const __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_3);
const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3);
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
_mm_storeu_si128((__m128i*) oN, v0_3);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storeu_si128((__m128i*) oN, v0_2);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storeu_si128((__m128i*) oN, v0_1);
case 0:
_mm_storeu_si128((__m128i*) o, v0_0);
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m128i v2_1 = _mm_loadu_si128((const __m128i*) i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const __m128i v2_2 = _mm_loadu_si128((const __m128i*) i2);
const __m128i v2_3 = _mm_undefined_si128();
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_1);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_1);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_2, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_2, v2_3);
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_2);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_2);
__m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_3);
__m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3);
if (bh & 2) {
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
_mm_storel_epi64((__m128i*) oN, v0_3);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storel_epi64((__m128i*) oN, v0_2);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storel_epi64((__m128i*) oN, v0_1);
case 0:
_mm_storel_epi64((__m128i*) o, v0_0);
break;
default:
XNN_UNREACHABLE;
}
o += 2;
v0_0 = _mm_unpackhi_epi64(v0_0, v0_0);
v0_1 = _mm_unpackhi_epi64(v0_1, v0_1);
v0_2 = _mm_unpackhi_epi64(v0_2, v0_2);
v0_3 = _mm_unpackhi_epi64(v0_3, v0_3);
}
if (bh & 1) {
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_3));
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_2));
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_1));
case 0:
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_0));
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 6,114
| 35.616766
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-multi-switch-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <wasm_simd128.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_multi_switch_wasmsimd(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
const uint32_t* i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32_t* i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
const uint32_t* i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
uint32_t* o = (uint32_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const v128_t v2_0 = wasm_v128_load(i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_offset);
const v128_t v2_1 = wasm_v128_load(i1);
i1 = (uint32_t*) ((uintptr_t) i1 + input_offset);
const v128_t v2_2 = wasm_v128_load(i2);
i2 = (uint32_t*) ((uintptr_t) i2 + input_offset);
const v128_t v2_3 = wasm_v128_load(i3);
i3 = (uint32_t*) ((uintptr_t) i3 + input_offset);
const v128_t v1_0 = wasm_v32x4_shuffle(v2_0, v2_2, 0, 4, 1, 5);
const v128_t v1_1 = wasm_v32x4_shuffle(v2_0, v2_2, 2, 6, 3, 7);
const v128_t v1_2 = wasm_v32x4_shuffle(v2_1, v2_3, 0, 4, 1, 5);
const v128_t v1_3 = wasm_v32x4_shuffle(v2_1, v2_3, 2, 6, 3, 7);
const v128_t v0_0 = wasm_v32x4_shuffle(v1_0, v1_2, 0, 4, 1, 5);
const v128_t v0_1 = wasm_v32x4_shuffle(v1_0, v1_2, 2, 6, 3, 7);
const v128_t v0_2 = wasm_v32x4_shuffle(v1_1, v1_3, 0, 4, 1, 5);
const v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
wasm_v128_store(oN, v0_3);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
wasm_v128_store(oN, v0_2);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
wasm_v128_store(oN, v0_1);
case 0:
wasm_v128_store(o, v0_0);
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const v128_t v2_0 = wasm_v128_load(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const v128_t v2_1 = wasm_v128_load(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const v128_t v2_2 = wasm_v128_load(i2);
const v128_t v2_3 = wasm_v128_xor(v2_0, v2_0);
const v128_t v1_0 = wasm_v32x4_shuffle(v2_0, v2_2, 0, 4, 1, 5);
const v128_t v1_1 = wasm_v32x4_shuffle(v2_0, v2_2, 2, 6, 3, 7);
const v128_t v1_2 = wasm_v32x4_shuffle(v2_1, v2_3, 0, 4, 1, 5);
const v128_t v1_3 = wasm_v32x4_shuffle(v2_1, v2_3, 2, 6, 3, 7);
v128_t v0_0 = wasm_v32x4_shuffle(v1_0, v1_2, 0, 4, 1, 5);
v128_t v0_1 = wasm_v32x4_shuffle(v1_0, v1_2, 2, 6, 3, 7);
v128_t v0_2 = wasm_v32x4_shuffle(v1_1, v1_3, 0, 4, 1, 5);
v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7);
if (bh & 2) {
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
wasm_v128_store64_lane(oN, v0_3, 0);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
wasm_v128_store64_lane(oN, v0_2, 0);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
wasm_v128_store64_lane(oN, v0_1, 0);
case 0:
wasm_v128_store64_lane(o, v0_0, 0);
o += 2;
break;
default:
XNN_UNREACHABLE;
}
v0_0 = wasm_v64x2_shuffle(v0_0, v0_0, 1, 1);
v0_1 = wasm_v64x2_shuffle(v0_1, v0_1, 1, 1);
v0_2 = wasm_v64x2_shuffle(v0_2, v0_2, 1, 1);
v0_3 = wasm_v64x2_shuffle(v0_3, v0_3, 1, 1);
}
if (bh & 1) {
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
wasm_v128_store32_lane(oN, v0_3, 0);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
wasm_v128_store32_lane(oN, v0_2, 0);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
wasm_v128_store32_lane(oN, v0_1, 0);
case 0:
wasm_v128_store32_lane(o, v0_0, 0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 6,011
| 36.341615
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-multi-switch-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_multi_switch_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
const uint32_t* i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32_t* i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
const uint32_t* i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
uint32_t* o = (uint32_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset);
const uint32x4_t v2_1 = vld1q_u32(i1); i1 = (uint32_t*) ((uintptr_t) i1 + input_offset);
const uint32x4_t v2_2 = vld1q_u32(i2); i2 = (uint32_t*) ((uintptr_t) i2 + input_offset);
const uint32x4_t v2_3 = vld1q_u32(i3); i3 = (uint32_t*) ((uintptr_t) i3 + input_offset);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
vst1q_u32(oN, v0_1.val[1]); oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1q_u32(oN, v0_1.val[0]); oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1q_u32(oN, v0_0.val[1]);
case 0:
vst1q_u32(o, v0_0.val[0]); o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const uint32x4_t v2_0 = vld1q_u32(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint32x4_t v2_1 = vld1q_u32(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const uint32x4_t v2_2 = vld1q_u32(i2);
const uint32x4_t v2_3 = vmovq_n_u32(0);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
uint32x2_t v0_low = vget_low_u32(v0_0.val[0]);
uint32x2_t v1_low = vget_low_u32(v0_0.val[1]);
uint32x2_t v2_low = vget_low_u32(v0_1.val[0]);
uint32x2_t v3_low = vget_low_u32(v0_1.val[1]);
if (bh & 2) {
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
vst1_u32(oN, v3_low); oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_u32(oN, v2_low); oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_u32(oN, v1_low);
case 0:
vst1_u32(o, v0_low); o += 2;
break;
default:
XNN_UNREACHABLE;
}
v0_low = vget_high_u32(v0_0.val[0]);
v1_low = vget_high_u32(v0_0.val[1]);
v2_low = vget_high_u32(v0_1.val[0]);
v3_low = vget_high_u32(v0_1.val[1]);
}
if (bh & 1) {
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
vst1_lane_u32(oN, v3_low, 0); oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u32(oN, v2_low, 0); oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u32(oN, v1_low, 0);
case 0:
vst1_lane_u32(o, v0_low, 0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,377
| 35.585034
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-reuse-dec-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_reuse_dec_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t) - tile_hbytes;
const uint32_t* i0 = input;
uint32_t* o = (uint32_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4_t v2_1 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4_t v2_2 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4_t v2_3 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
o = (uint32_t*) ((uintptr_t) o + oN_offset);
vst1q_u32(o, v0_1.val[1]);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u32(o, v0_1.val[0]);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u32(o, v0_0.val[1]);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u32(o, v0_0.val[0]);
}
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint32x4_t v2_0 = vld1q_u32(i0);
const uint32_t *i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint32x4_t v2_1 = vld1q_u32(i1);
const uint32_t *i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint32x4_t v2_2 = vld1q_u32(i2);
const uint32x4_t v2_3 = vmovq_n_u32(0);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
uint32x2_t v0_low = vget_low_u32(v0_0.val[0]);
uint32x2_t v1_low = vget_low_u32(v0_0.val[1]);
uint32x2_t v2_low = vget_low_u32(v0_1.val[0]);
uint32x2_t v3_low = vget_low_u32(v0_1.val[1]);
if (bh & 2) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
vst1_u32(o, v3_low);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u32(o, v2_low);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u32(o, v1_low);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u32(o, v0_low); o += 2;
v0_low = vget_high_u32(v0_0.val[0]);
v1_low = vget_high_u32(v0_0.val[1]);
v2_low = vget_high_u32(v0_1.val[0]);
v3_low = vget_high_u32(v0_1.val[1]);
}
if (bh & 1) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32(o, v3_low, 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32(o, v2_low, 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32(o, v1_low, 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint32_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32(o, v0_low, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,332
| 36.034722
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-reuse-mov-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x32_transposec_ukernel__4x4_reuse_mov_sse2(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t) - tile_hbytes;
const uint32_t* i0 = input;
uint32_t* o = (uint32_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const __m128i v2_1 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const __m128i v2_2 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const __m128i v2_3 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_1);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_1);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_2, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_2, v2_3);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_2);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_2);
const __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_3);
const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3);
o = (uint32_t*) ((uintptr_t) o + oN_offset);
_mm_storeu_si128((__m128i*) o, v0_3);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_2);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_1);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_0);
}
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0);
const uint32_t *i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m128i v2_1 = _mm_loadu_si128((const __m128i*) i1);
const uint32_t *i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const __m128i v2_2 = _mm_loadu_si128((const __m128i*) i2);
const __m128i v2_3 = _mm_undefined_si128();
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_1);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_1);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_2, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_2, v2_3);
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_2);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_2);
__m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_3);
__m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3);
if (bh & 2) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
_mm_storel_epi64((__m128i*) o, v0_3);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_2);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_1);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_0);
o += 2;
v0_0 = _mm_unpackhi_epi64(v0_0, v0_0);
v0_1 = _mm_unpackhi_epi64(v0_1, v0_1);
v0_2 = _mm_unpackhi_epi64(v0_2, v0_2);
v0_3 = _mm_unpackhi_epi64(v0_3, v0_3);
}
if (bh & 1) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_3));
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_2));
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_1));
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_0));
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 6,168
| 36.162651
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-reuse-mov-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <wasm_simd128.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_reuse_mov_wasmsimd(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t) - tile_hbytes;
const uint32_t* i0 = input;
uint32_t* o = (uint32_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const v128_t v2_0 = wasm_v128_load(i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const v128_t v2_1 = wasm_v128_load(i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const v128_t v2_2 = wasm_v128_load(i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const v128_t v2_3 = wasm_v128_load(i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const v128_t v1_0 = wasm_v32x4_shuffle(v2_0, v2_2, 0, 4, 1, 5);
const v128_t v1_1 = wasm_v32x4_shuffle(v2_0, v2_2, 2, 6, 3, 7);
const v128_t v1_2 = wasm_v32x4_shuffle(v2_1, v2_3, 0, 4, 1, 5);
const v128_t v1_3 = wasm_v32x4_shuffle(v2_1, v2_3, 2, 6, 3, 7);
const v128_t v0_0 = wasm_v32x4_shuffle(v1_0, v1_2, 0, 4, 1, 5);
const v128_t v0_1 = wasm_v32x4_shuffle(v1_0, v1_2, 2, 6, 3, 7);
const v128_t v0_2 = wasm_v32x4_shuffle(v1_1, v1_3, 0, 4, 1, 5);
const v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7);
o = (uint32_t*) ((uintptr_t) o + oN_offset);
wasm_v128_store(o, v0_3);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
wasm_v128_store(o, v0_2);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
wasm_v128_store(o, v0_1);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
wasm_v128_store(o, v0_0);
}
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const v128_t v2_0 = wasm_v128_load(i0);
const uint32_t *i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const v128_t v2_1 = wasm_v128_load(i1);
const uint32_t *i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const v128_t v2_2 = wasm_v128_load(i2);
const v128_t v2_3 = wasm_v128_xor(v2_0, v2_0);
const v128_t v1_0 = wasm_v32x4_shuffle(v2_0, v2_2, 0, 4, 1, 5);
const v128_t v1_1 = wasm_v32x4_shuffle(v2_0, v2_2, 2, 6, 3, 7);
const v128_t v1_2 = wasm_v32x4_shuffle(v2_1, v2_3, 0, 4, 1, 5);
const v128_t v1_3 = wasm_v32x4_shuffle(v2_1, v2_3, 2, 6, 3, 7);
v128_t v0_0 = wasm_v32x4_shuffle(v1_0, v1_2, 0, 4, 1, 5);
v128_t v0_1 = wasm_v32x4_shuffle(v1_0, v1_2, 2, 6, 3, 7);
v128_t v0_2 = wasm_v32x4_shuffle(v1_1, v1_3, 0, 4, 1, 5);
v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7);
if (bh & 2) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
wasm_v128_store64_lane(o, v0_3, 0);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
wasm_v128_store64_lane(o, v0_2, 0);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
wasm_v128_store64_lane(o, v0_1, 0);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
wasm_v128_store64_lane(o, v0_0, 0);
o += 2;
v0_0 = wasm_v64x2_shuffle(v0_0, v0_0, 1, 1);
v0_1 = wasm_v64x2_shuffle(v0_1, v0_1, 1, 1);
v0_2 = wasm_v64x2_shuffle(v0_2, v0_2, 1, 1);
v0_3 = wasm_v64x2_shuffle(v0_3, v0_3, 1, 1);
}
if (bh & 1) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
wasm_v128_store32_lane(o, v0_3, 0);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
wasm_v128_store32_lane(o, v0_2, 0);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
wasm_v128_store32_lane(o, v0_1, 0);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
wasm_v128_store32_lane(o, v0_0, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 6,061
| 36.8875
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-reuse-mov-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_reuse_mov_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t) - tile_hbytes;
const uint32_t* i0 = input;
uint32_t* o = (uint32_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4_t v2_1 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4_t v2_2 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4_t v2_3 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
o = (uint32_t*) ((uintptr_t) o + oN_offset);
vst1q_u32(o, v0_1.val[1]);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1q_u32(o, v0_1.val[0]);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1q_u32(o, v0_0.val[1]);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1q_u32(o, v0_0.val[0]);
}
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint32x4_t v2_0 = vld1q_u32(i0);
const uint32_t *i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint32x4_t v2_1 = vld1q_u32(i1);
const uint32_t *i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint32x4_t v2_2 = vld1q_u32(i2);
const uint32x4_t v2_3 = vmovq_n_u32(0);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
uint32x2_t v0_low = vget_low_u32(v0_0.val[0]);
uint32x2_t v1_low = vget_low_u32(v0_0.val[1]);
uint32x2_t v2_low = vget_low_u32(v0_1.val[0]);
uint32x2_t v3_low = vget_low_u32(v0_1.val[1]);
if (bh & 2) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
vst1_u32(o, v3_low);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_u32(o, v2_low);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_u32(o, v1_low);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_u32(o, v0_low); o += 2;
v0_low = vget_high_u32(v0_0.val[0]);
v1_low = vget_high_u32(v0_0.val[1]);
v2_low = vget_high_u32(v0_1.val[0]);
v3_low = vget_high_u32(v0_1.val[1]);
}
if (bh & 1) {
o = (uint32_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32(o, v3_low, 0);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u32(o, v2_low, 0);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u32(o, v1_low, 0);
oN = (uint32_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u32(o, v0_low, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,509
| 35.013072
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-reuse-multi-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x32_transposec_ukernel__4x4_reuse_multi_sse2(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
uint32_t* o0 = (uint32_t*) output;
uint32_t* o1 = (uint32_t*) ((uintptr_t) o0 + output_stride);
uint32_t* o2 = (uint32_t*) ((uintptr_t) o1 + output_stride);
uint32_t* o3 = (uint32_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const __m128i v2_1 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const __m128i v2_2 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const __m128i v2_3 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_1);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_1);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_2, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_2, v2_3);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_2);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_2);
const __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_3);
const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3);
_mm_storeu_si128((__m128i*) o3, v0_3);
o3 = (uint32_t*) ((uintptr_t) o3 + tile_hbytes);
_mm_storeu_si128((__m128i*) o2, v0_2);
o2 = (uint32_t*) ((uintptr_t) o2 + tile_hbytes);
_mm_storeu_si128((__m128i*) o1, v0_1);
o1 = (uint32_t*) ((uintptr_t) o1 + tile_hbytes);
_mm_storeu_si128((__m128i*) o0, v0_0);
o0 = (uint32_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0);
const uint32_t *i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m128i v2_1 = _mm_loadu_si128((const __m128i*) i1);
const uint32_t *i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const __m128i v2_2 = _mm_loadu_si128((const __m128i*) i2);
const __m128i v2_3 = _mm_undefined_si128();
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_1);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_1);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_2, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_2, v2_3);
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_2);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_2);
__m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_3);
__m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3);
if (bh & 2) {
_mm_storel_epi64((__m128i*) o3, v0_3);
o3 += 2;
_mm_storel_epi64((__m128i*) o2, v0_2);
o2 += 2;
_mm_storel_epi64((__m128i*) o1, v0_1);
o1 += 2;
_mm_storel_epi64((__m128i*) o0, v0_0);
o0 += 2;
v0_0 = _mm_unpackhi_epi64(v0_0, v0_0);
v0_1 = _mm_unpackhi_epi64(v0_1, v0_1);
v0_2 = _mm_unpackhi_epi64(v0_2, v0_2);
v0_3 = _mm_unpackhi_epi64(v0_3, v0_3);
}
if (bh & 1) {
unaligned_store_u32(o3, (uint32_t) _mm_cvtsi128_si32(v0_3));
unaligned_store_u32(o2, (uint32_t) _mm_cvtsi128_si32(v0_2));
unaligned_store_u32(o1, (uint32_t) _mm_cvtsi128_si32(v0_1));
unaligned_store_u32(o0, (uint32_t) _mm_cvtsi128_si32(v0_0));
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
o0 = (uint32_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint32_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint32_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint32_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,280
| 35.673611
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-reuse-multi-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <wasm_simd128.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_reuse_multi_wasmsimd(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
uint32_t* o0 = (uint32_t*) output;
uint32_t* o1 = (uint32_t*) ((uintptr_t) o0 + output_stride);
uint32_t* o2 = (uint32_t*) ((uintptr_t) o1 + output_stride);
uint32_t* o3 = (uint32_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const v128_t v2_0 = wasm_v128_load(i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const v128_t v2_1 = wasm_v128_load(i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const v128_t v2_2 = wasm_v128_load(i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const v128_t v2_3 = wasm_v128_load(i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const v128_t v1_0 = wasm_v32x4_shuffle(v2_0, v2_2, 0, 4, 1, 5);
const v128_t v1_1 = wasm_v32x4_shuffle(v2_0, v2_2, 2, 6, 3, 7);
const v128_t v1_2 = wasm_v32x4_shuffle(v2_1, v2_3, 0, 4, 1, 5);
const v128_t v1_3 = wasm_v32x4_shuffle(v2_1, v2_3, 2, 6, 3, 7);
const v128_t v0_0 = wasm_v32x4_shuffle(v1_0, v1_2, 0, 4, 1, 5);
const v128_t v0_1 = wasm_v32x4_shuffle(v1_0, v1_2, 2, 6, 3, 7);
const v128_t v0_2 = wasm_v32x4_shuffle(v1_1, v1_3, 0, 4, 1, 5);
const v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7);
wasm_v128_store(o3, v0_3);
o3 = (uint32_t*) ((uintptr_t) o3 + tile_hbytes);
wasm_v128_store(o2, v0_2);
o2 = (uint32_t*) ((uintptr_t) o2 + tile_hbytes);
wasm_v128_store(o1, v0_1);
o1 = (uint32_t*) ((uintptr_t) o1 + tile_hbytes);
wasm_v128_store(o0, v0_0);
o0 = (uint32_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const v128_t v2_0 = wasm_v128_load(i0);
const uint32_t *i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const v128_t v2_1 = wasm_v128_load(i1);
const uint32_t *i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const v128_t v2_2 = wasm_v128_load(i2);
const v128_t v2_3 = wasm_v128_xor(v2_0, v2_0);
const v128_t v1_0 = wasm_v32x4_shuffle(v2_0, v2_2, 0, 4, 1, 5);
const v128_t v1_1 = wasm_v32x4_shuffle(v2_0, v2_2, 2, 6, 3, 7);
const v128_t v1_2 = wasm_v32x4_shuffle(v2_1, v2_3, 0, 4, 1, 5);
const v128_t v1_3 = wasm_v32x4_shuffle(v2_1, v2_3, 2, 6, 3, 7);
v128_t v0_0 = wasm_v32x4_shuffle(v1_0, v1_2, 0, 4, 1, 5);
v128_t v0_1 = wasm_v32x4_shuffle(v1_0, v1_2, 2, 6, 3, 7);
v128_t v0_2 = wasm_v32x4_shuffle(v1_1, v1_3, 0, 4, 1, 5);
v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7);
if (bh & 2) {
wasm_v128_store64_lane(o3, v0_3, 0);
o3 += 2;
wasm_v128_store64_lane(o2, v0_2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, v0_1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, v0_0, 0);
o0 += 2;
v0_0 = wasm_v64x2_shuffle(v0_0, v0_0, 1, 1);
v0_1 = wasm_v64x2_shuffle(v0_1, v0_1, 1, 1);
v0_2 = wasm_v64x2_shuffle(v0_2, v0_2, 1, 1);
v0_3 = wasm_v64x2_shuffle(v0_3, v0_3, 1, 1);
}
if (bh & 1) {
wasm_v128_store32_lane(o3, v0_3, 0);
wasm_v128_store32_lane(o2, v0_2, 0);
wasm_v128_store32_lane(o1, v0_1, 0);
wasm_v128_store32_lane(o0, v0_0, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
o0 = (uint32_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint32_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint32_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint32_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,173
| 36.492754
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-reuse-multi-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_reuse_multi_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
uint32_t* o0 = (uint32_t*) output;
uint32_t* o1 = (uint32_t*) ((uintptr_t) o0 + output_stride);
uint32_t* o2 = (uint32_t*) ((uintptr_t) o1 + output_stride);
uint32_t* o3 = (uint32_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4_t v2_1 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4_t v2_2 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4_t v2_3 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
vst1q_u32(o3, v0_1.val[1]); o3 = (uint32_t*) ((uintptr_t) o3 + tile_hbytes);
vst1q_u32(o2, v0_1.val[0]); o2 = (uint32_t*) ((uintptr_t) o2 + tile_hbytes);
vst1q_u32(o1, v0_0.val[1]); o1 = (uint32_t*) ((uintptr_t) o1 + tile_hbytes);
vst1q_u32(o0, v0_0.val[0]); o0 = (uint32_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const uint32x4_t v2_0 = vld1q_u32(i0);
const uint32_t *i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint32x4_t v2_1 = vld1q_u32(i1);
const uint32_t *i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint32x4_t v2_2 = vld1q_u32(i2);
const uint32x4_t v2_3 = vmovq_n_u32(0);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
uint32x2_t v0_low = vget_low_u32(v0_0.val[0]);
uint32x2_t v1_low = vget_low_u32(v0_0.val[1]);
uint32x2_t v2_low = vget_low_u32(v0_1.val[0]);
uint32x2_t v3_low = vget_low_u32(v0_1.val[1]);
if (bh & 2) {
vst1_u32(o3, v3_low); o3 += 2;
vst1_u32(o2, v2_low); o2 += 2;
vst1_u32(o1, v1_low); o1 += 2;
vst1_u32(o0, v0_low); o0 += 2;
v0_low = vget_high_u32(v0_0.val[0]);
v1_low = vget_high_u32(v0_0.val[1]);
v2_low = vget_high_u32(v0_1.val[0]);
v3_low = vget_high_u32(v0_1.val[1]);
}
if (bh & 1) {
vst1_lane_u32(o3, v3_low, 0);
vst1_lane_u32(o2, v2_low, 0);
vst1_lane_u32(o1, v1_low, 0);
vst1_lane_u32(o0, v0_low, 0);
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
o0 = (uint32_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint32_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint32_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint32_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 4,573
| 35.887097
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-reuse-switch-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x32_transposec_ukernel__4x4_reuse_switch_sse2(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
uint32_t* o = (uint32_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const __m128i v2_1 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const __m128i v2_2 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const __m128i v2_3 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_1);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_1);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_2, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_2, v2_3);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_2);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_2);
const __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_3);
const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3);
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
_mm_storeu_si128((__m128i*) oN, v0_3);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storeu_si128((__m128i*) oN, v0_2);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storeu_si128((__m128i*) oN, v0_1);
case 0:
_mm_storeu_si128((__m128i*) o, v0_0);
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0);
const uint32_t *i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m128i v2_1 = _mm_loadu_si128((const __m128i*) i1);
const uint32_t *i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const __m128i v2_2 = _mm_loadu_si128((const __m128i*) i2);
const __m128i v2_3 = _mm_undefined_si128();
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_1);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_1);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_2, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_2, v2_3);
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_2);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_2);
__m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_3);
__m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3);
if (bh & 2) {
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
_mm_storel_epi64((__m128i*) oN, v0_3);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storel_epi64((__m128i*) oN, v0_2);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storel_epi64((__m128i*) oN, v0_1);
case 0:
_mm_storel_epi64((__m128i*) o, v0_0);
break;
default:
XNN_UNREACHABLE;
}
o += 2;
v0_0 = _mm_unpackhi_epi64(v0_0, v0_0);
v0_1 = _mm_unpackhi_epi64(v0_1, v0_1);
v0_2 = _mm_unpackhi_epi64(v0_2, v0_2);
v0_3 = _mm_unpackhi_epi64(v0_3, v0_3);
}
if (bh & 1) {
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_3));
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_2));
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_1));
case 0:
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_0));
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,810
| 34.87037
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-reuse-switch-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <wasm_simd128.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_reuse_switch_wasmsimd(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
uint32_t* o = (uint32_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const v128_t v2_0 = wasm_v128_load(i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const v128_t v2_1 = wasm_v128_load(i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const v128_t v2_2 = wasm_v128_load(i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const v128_t v2_3 = wasm_v128_load(i0);
i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const v128_t v1_0 = wasm_v32x4_shuffle(v2_0, v2_2, 0, 4, 1, 5);
const v128_t v1_1 = wasm_v32x4_shuffle(v2_0, v2_2, 2, 6, 3, 7);
const v128_t v1_2 = wasm_v32x4_shuffle(v2_1, v2_3, 0, 4, 1, 5);
const v128_t v1_3 = wasm_v32x4_shuffle(v2_1, v2_3, 2, 6, 3, 7);
const v128_t v0_0 = wasm_v32x4_shuffle(v1_0, v1_2, 0, 4, 1, 5);
const v128_t v0_1 = wasm_v32x4_shuffle(v1_0, v1_2, 2, 6, 3, 7);
const v128_t v0_2 = wasm_v32x4_shuffle(v1_1, v1_3, 0, 4, 1, 5);
const v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
wasm_v128_store(oN, v0_3);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
wasm_v128_store(oN, v0_2);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
wasm_v128_store(oN, v0_1);
case 0:
wasm_v128_store(o, v0_0);
o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const v128_t v2_0 = wasm_v128_load(i0);
const uint32_t *i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const v128_t v2_1 = wasm_v128_load(i1);
const uint32_t *i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const v128_t v2_2 = wasm_v128_load(i2);
const v128_t v2_3 = wasm_v128_xor(v2_0, v2_0);
const v128_t v1_0 = wasm_v32x4_shuffle(v2_0, v2_2, 0, 4, 1, 5);
const v128_t v1_1 = wasm_v32x4_shuffle(v2_0, v2_2, 2, 6, 3, 7);
const v128_t v1_2 = wasm_v32x4_shuffle(v2_1, v2_3, 0, 4, 1, 5);
const v128_t v1_3 = wasm_v32x4_shuffle(v2_1, v2_3, 2, 6, 3, 7);
v128_t v0_0 = wasm_v32x4_shuffle(v1_0, v1_2, 0, 4, 1, 5);
v128_t v0_1 = wasm_v32x4_shuffle(v1_0, v1_2, 2, 6, 3, 7);
v128_t v0_2 = wasm_v32x4_shuffle(v1_1, v1_3, 0, 4, 1, 5);
v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7);
if (bh & 2) {
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
wasm_v128_store64_lane(oN, v0_3, 0);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
wasm_v128_store64_lane(oN, v0_2, 0);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
wasm_v128_store64_lane(oN, v0_1, 0);
case 0:
wasm_v128_store64_lane(o, v0_0, 0);
o += 2;
break;
default:
XNN_UNREACHABLE;
}
v0_0 = wasm_v64x2_shuffle(v0_0, v0_0, 1, 1);
v0_1 = wasm_v64x2_shuffle(v0_1, v0_1, 1, 1);
v0_2 = wasm_v64x2_shuffle(v0_2, v0_2, 1, 1);
v0_3 = wasm_v64x2_shuffle(v0_3, v0_3, 1, 1);
}
if (bh & 1) {
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
wasm_v128_store32_lane(oN, v0_3, 0);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
wasm_v128_store32_lane(oN, v0_2, 0);
oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
wasm_v128_store32_lane(oN, v0_1, 0);
case 0:
wasm_v128_store32_lane(o, v0_0, 0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,707
| 35.589744
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-reuse-switch-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_reuse_switch_zip_neon(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint32_t);
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const uint32_t* i0 = input;
uint32_t* o = (uint32_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4_t v2_1 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4_t v2_2 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4_t v2_3 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
uint32_t *oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
vst1q_u32(oN, v0_1.val[1]); oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1q_u32(oN, v0_1.val[0]); oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1q_u32(oN, v0_0.val[1]);
case 0:
vst1q_u32(o, v0_0.val[0]); o = (uint32_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const uint32x4_t v2_0 = vld1q_u32(i0);
const uint32_t *i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint32x4_t v2_1 = vld1q_u32(i1);
const uint32_t *i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint32x4_t v2_2 = vld1q_u32(i2);
const uint32x4_t v2_3 = vmovq_n_u32(0);
const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2);
const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3);
const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]);
const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]);
uint32x2_t v0_low = vget_low_u32(v0_0.val[0]);
uint32x2_t v1_low = vget_low_u32(v0_0.val[1]);
uint32x2_t v2_low = vget_low_u32(v0_1.val[0]);
uint32x2_t v3_low = vget_low_u32(v0_1.val[1]);
if (bh & 2) {
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
vst1_u32(oN, v3_low); oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_u32(oN, v2_low); oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_u32(oN, v1_low);
case 0:
vst1_u32(o, v0_low); o += 2;
break;
default:
XNN_UNREACHABLE;
}
v0_low = vget_high_u32(v0_0.val[0]);
v1_low = vget_high_u32(v0_0.val[1]);
v2_low = vget_high_u32(v0_1.val[0]);
v3_low = vget_high_u32(v0_1.val[1]);
}
if (bh & 1) {
uint32_t* oN = (uint32_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
vst1_lane_u32(oN, v3_low, 0); oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u32(oN, v2_low, 0); oN = (uint32_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u32(oN, v1_low, 0);
case 0:
vst1_lane_u32(o, v0_low, 0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset);
o = (uint32_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,073
| 34.732394
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_scalar_float(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(float));
assert(input_stride >= block_width * sizeof(float));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * sizeof(float);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(float);
const size_t input_offset = tile_height * input_stride;
const float* i0 = (const float*) input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o0 = (float*) output;
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o3++ = i0[3];
*o3++ = i1[3];
*o3++ = i2[3];
*o3++ = i3[3];
*o2++ = i0[2];
*o2++ = i1[2];
*o2++ = i2[2];
*o2++ = i3[2];
*o1++ = i0[1];
*o1++ = i1[1];
*o1++ = i2[1];
*o1++ = i3[1];
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i = i0;
if (bh & 2) {
o3[0] = i0[3];
o3[1] = i1[3];
o3 += 2;
o2[0] = i0[2];
o2[1] = i1[2];
o2 += 2;
o1[0] = i0[1];
o1[1] = i1[1];
o1 += 2;
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o3[0] = i[3];
o2[0] = i[2];
o1[0] = i[1];
o0[0] = i[0];
}
i0 = (const float*) ((uintptr_t) i0 + input_reset);
i1 = (const float*) ((uintptr_t) i0 + input_stride);
i2 = (const float*) ((uintptr_t) i1 + input_stride);
i3 = (const float*) ((uintptr_t) i2 + input_stride);
o0 = (float*) ((uintptr_t) o0 + output_reset);
o1 = (float*) ((uintptr_t) o1 + output_reset);
o2 = (float*) ((uintptr_t) o2 + output_reset);
o3 = (float*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,354
| 28.690265
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-4x4-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_scalar_int(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int));
assert(input_stride >= block_width * sizeof(int));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * sizeof(int);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int);
const size_t input_offset = tile_height * input_stride;
const int* i0 = (const int*) input;
const int* i1 = (const int*) ((uintptr_t) i0 + input_stride);
const int* i2 = (const int*) ((uintptr_t) i1 + input_stride);
const int* i3 = (const int*) ((uintptr_t) i2 + input_stride);
int* o0 = (int*) output;
int* o1 = (int*) ((uintptr_t) o0 + output_stride);
int* o2 = (int*) ((uintptr_t) o1 + output_stride);
int* o3 = (int*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o3++ = i0[3];
*o3++ = i1[3];
*o3++ = i2[3];
*o3++ = i3[3];
*o2++ = i0[2];
*o2++ = i1[2];
*o2++ = i2[2];
*o2++ = i3[2];
*o1++ = i0[1];
*o1++ = i1[1];
*o1++ = i2[1];
*o1++ = i3[1];
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const int*) ((uintptr_t) i0 + input_offset);
i1 = (const int*) ((uintptr_t) i1 + input_offset);
i2 = (const int*) ((uintptr_t) i2 + input_offset);
i3 = (const int*) ((uintptr_t) i3 + input_offset);
}
const int* i = i0;
if (bh & 2) {
o3[0] = i0[3];
o3[1] = i1[3];
o3 += 2;
o2[0] = i0[2];
o2[1] = i1[2];
o2 += 2;
o1[0] = i0[1];
o1[1] = i1[1];
o1 += 2;
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o3[0] = i[3];
o2[0] = i[2];
o1[0] = i[1];
o0[0] = i[0];
}
i0 = (const int*) ((uintptr_t) i0 + input_reset);
i1 = (const int*) ((uintptr_t) i0 + input_stride);
i2 = (const int*) ((uintptr_t) i1 + input_stride);
i3 = (const int*) ((uintptr_t) i2 + input_stride);
o0 = (int*) ((uintptr_t) o0 + output_reset);
o1 = (int*) ((uintptr_t) o1 + output_reset);
o2 = (int*) ((uintptr_t) o2 + output_reset);
o3 = (int*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,286
| 28.088496
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-8x8-multi-mov-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x32_transposec_ukernel__8x8_multi_mov_avx(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(float));
assert(input_stride >= block_width * sizeof(float));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(float);
const size_t tile_wbytes = tile_width * sizeof(float);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(float) - tile_hbytes;
const float* i0 = (const float*) input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
const float* i7 = (const float*) ((uintptr_t) i6 + input_stride);
float* o = (float*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
__m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[rem ^ 7]));
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const __m256 v3_0 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_offset);
const __m256 v3_1 = _mm256_maskload_ps(i1, vmask);
i1 = (float*) ((uintptr_t) i1 + input_offset);
const __m256 v3_2 = _mm256_maskload_ps(i2, vmask);
i2 = (float*) ((uintptr_t) i2 + input_offset);
const __m256 v3_3 = _mm256_maskload_ps(i3, vmask);
i3 = (float*) ((uintptr_t) i3 + input_offset);
const __m256 v3_4 = _mm256_maskload_ps(i4, vmask);
i4 = (float*) ((uintptr_t) i4 + input_offset);
const __m256 v3_5 = _mm256_maskload_ps(i5, vmask);
i5 = (float*) ((uintptr_t) i5 + input_offset);
const __m256 v3_6 = _mm256_maskload_ps(i6, vmask);
i6 = (float*) ((uintptr_t) i6 + input_offset);
const __m256 v3_7 = _mm256_maskload_ps(i7, vmask);
i7 = (float*) ((uintptr_t) i7 + input_offset);
const __m256 v2_0 = _mm256_unpacklo_ps(v3_0, v3_2);
const __m256 v2_1 = _mm256_unpackhi_ps(v3_0, v3_2);
const __m256 v2_2 = _mm256_unpacklo_ps(v3_1, v3_3);
const __m256 v2_3 = _mm256_unpackhi_ps(v3_1, v3_3);
const __m256 v2_4 = _mm256_unpacklo_ps(v3_4, v3_6);
const __m256 v2_5 = _mm256_unpackhi_ps(v3_4, v3_6);
const __m256 v2_6 = _mm256_unpacklo_ps(v3_5, v3_7);
const __m256 v2_7 = _mm256_unpackhi_ps(v3_5, v3_7);
const __m256 v1_0 = _mm256_unpacklo_ps(v2_0, v2_2);
const __m256 v1_1 = _mm256_unpackhi_ps(v2_0, v2_2);
const __m256 v1_2 = _mm256_unpacklo_ps(v2_1, v2_3);
const __m256 v1_3 = _mm256_unpackhi_ps(v2_1, v2_3);
const __m256 v1_4 = _mm256_unpacklo_ps(v2_4, v2_6);
const __m256 v1_5 = _mm256_unpackhi_ps(v2_4, v2_6);
const __m256 v1_6 = _mm256_unpacklo_ps(v2_5, v2_7);
const __m256 v1_7 = _mm256_unpackhi_ps(v2_5, v2_7);
const __m256 v0_0 = _mm256_insertf128_ps(v1_0, _mm256_castps256_ps128(v1_4), 1);
const __m256 v0_4 = _mm256_permute2f128_ps(v1_0, v1_4, 0x31);
const __m256 v0_1 = _mm256_insertf128_ps(v1_1, _mm256_castps256_ps128(v1_5), 1);
const __m256 v0_5 = _mm256_permute2f128_ps(v1_1, v1_5, 0x31);
const __m256 v0_2 = _mm256_insertf128_ps(v1_2, _mm256_castps256_ps128(v1_6), 1);
const __m256 v0_6 = _mm256_permute2f128_ps(v1_2, v1_6, 0x31);
const __m256 v0_3 = _mm256_insertf128_ps(v1_3, _mm256_castps256_ps128(v1_7), 1);
const __m256 v0_7 = _mm256_permute2f128_ps(v1_3, v1_7, 0x31);
o = (float*) ((uintptr_t) o + oN_offset);
_mm256_storeu_ps(o, v0_7);
float *oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
_mm256_storeu_ps(o, v0_6);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
_mm256_storeu_ps(o, v0_5);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
_mm256_storeu_ps(o, v0_4);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
_mm256_storeu_ps(o, v0_3);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm256_storeu_ps(o, v0_2);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm256_storeu_ps(o, v0_1);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm256_storeu_ps(o, v0_0);
}
o = (float*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const __m256 v3_0 = _mm256_maskload_ps(i0, vmask);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m256 v3_1 = _mm256_maskload_ps(i1, vmask);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const __m256 v3_2 = _mm256_maskload_ps(i2, vmask);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i0;
}
const __m256 v3_3 = _mm256_maskload_ps(i3, vmask);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i0;
}
const __m256 v3_4 = _mm256_maskload_ps(i4, vmask);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i0;
}
const __m256 v3_5 = _mm256_maskload_ps(i5, vmask);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i0;
}
const __m256 v3_6 = _mm256_maskload_ps(i6, vmask);
const __m256 v3_7 = _mm256_undefined_ps();
const __m256 v2_0 = _mm256_unpacklo_ps(v3_0, v3_2);
const __m256 v2_1 = _mm256_unpackhi_ps(v3_0, v3_2);
const __m256 v2_2 = _mm256_unpacklo_ps(v3_1, v3_3);
const __m256 v2_3 = _mm256_unpackhi_ps(v3_1, v3_3);
const __m256 v2_4 = _mm256_unpacklo_ps(v3_4, v3_6);
const __m256 v2_5 = _mm256_unpackhi_ps(v3_4, v3_6);
const __m256 v2_6 = _mm256_unpacklo_ps(v3_5, v3_7);
const __m256 v2_7 = _mm256_unpackhi_ps(v3_5, v3_7);
const __m256 v1_0 = _mm256_unpacklo_ps(v2_0, v2_2);
const __m256 v1_1 = _mm256_unpackhi_ps(v2_0, v2_2);
const __m256 v1_2 = _mm256_unpacklo_ps(v2_1, v2_3);
const __m256 v1_3 = _mm256_unpackhi_ps(v2_1, v2_3);
const __m256 v1_4 = _mm256_unpacklo_ps(v2_4, v2_6);
const __m256 v1_5 = _mm256_unpackhi_ps(v2_4, v2_6);
const __m256 v1_6 = _mm256_unpacklo_ps(v2_5, v2_7);
const __m256 v1_7 = _mm256_unpackhi_ps(v2_5, v2_7);
__m256 v0_0 = _mm256_insertf128_ps(v1_0, _mm256_castps256_ps128(v1_4), 1);
__m256 v0_4 = _mm256_permute2f128_ps(v1_0, v1_4, 0x31);
__m256 v0_1 = _mm256_insertf128_ps(v1_1, _mm256_castps256_ps128(v1_5), 1);
__m256 v0_5 = _mm256_permute2f128_ps(v1_1, v1_5, 0x31);
__m256 v0_2 = _mm256_insertf128_ps(v1_2, _mm256_castps256_ps128(v1_6), 1);
__m256 v0_6 = _mm256_permute2f128_ps(v1_2, v1_6, 0x31);
__m256 v0_3 = _mm256_insertf128_ps(v1_3, _mm256_castps256_ps128(v1_7), 1);
__m256 v0_7 = _mm256_permute2f128_ps(v1_3, v1_7, 0x31);
__m128 v0_0_lo = _mm256_castps256_ps128(v0_0);
__m128 v0_1_lo = _mm256_castps256_ps128(v0_1);
__m128 v0_2_lo = _mm256_castps256_ps128(v0_2);
__m128 v0_3_lo = _mm256_castps256_ps128(v0_3);
__m128 v0_4_lo = _mm256_castps256_ps128(v0_4);
__m128 v0_5_lo = _mm256_castps256_ps128(v0_5);
__m128 v0_6_lo = _mm256_castps256_ps128(v0_6);
__m128 v0_7_lo = _mm256_castps256_ps128(v0_7);
if (bh & 4) {
o = (float*) ((uintptr_t) o + oN_stride);
_mm_storeu_ps(o, v0_7_lo);
v0_7_lo = _mm256_extractf128_ps(v0_7, 1);
float *oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
_mm_storeu_ps(o, v0_6_lo);
v0_6_lo = _mm256_extractf128_ps(v0_6, 1);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
_mm_storeu_ps(o, v0_5_lo);
v0_5_lo = _mm256_extractf128_ps(v0_5, 1);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
_mm_storeu_ps(o, v0_4_lo);
v0_4_lo = _mm256_extractf128_ps(v0_4, 1);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
_mm_storeu_ps(o, v0_3_lo);
v0_3_lo = _mm256_extractf128_ps(v0_3, 1);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storeu_ps(o, v0_2_lo);
v0_2_lo = _mm256_extractf128_ps(v0_2, 1);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storeu_ps(o, v0_1_lo);
v0_1_lo = _mm256_extractf128_ps(v0_1, 1);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storeu_ps(o, v0_0_lo);
v0_0_lo = _mm256_extractf128_ps(v0_0, 1);
o += 4;
}
if (bh & 2) {
o = (float*) ((uintptr_t) o + oN_stride);
_mm_storel_pi((__m64*) o, v0_7_lo);
float *oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
_mm_storel_pi((__m64*) o, v0_6_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
_mm_storel_pi((__m64*) o, v0_5_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
_mm_storel_pi((__m64*) o, v0_4_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
_mm_storel_pi((__m64*) o, v0_3_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storel_pi((__m64*) o, v0_2_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storel_pi((__m64*) o, v0_1_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storel_pi((__m64*) o, v0_0_lo);
o += 2;
v0_0_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_0_lo), _mm_castps_pd(v0_0_lo)));
v0_1_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_1_lo), _mm_castps_pd(v0_1_lo)));
v0_2_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_2_lo), _mm_castps_pd(v0_2_lo)));
v0_3_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_3_lo), _mm_castps_pd(v0_3_lo)));
v0_4_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_4_lo), _mm_castps_pd(v0_4_lo)));
v0_5_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_5_lo), _mm_castps_pd(v0_5_lo)));
v0_6_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_6_lo), _mm_castps_pd(v0_6_lo)));
v0_7_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_7_lo), _mm_castps_pd(v0_7_lo)));
}
if (bh & 1) {
o = (float*) ((uintptr_t) o + oN_stride);
_mm_store_ss(o, v0_7_lo);
float* oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
_mm_store_ss(o, v0_6_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
_mm_store_ss(o, v0_5_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
_mm_store_ss(o, v0_4_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
_mm_store_ss(o, v0_3_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_store_ss(o, v0_2_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_store_ss(o, v0_1_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_store_ss(o, v0_0_lo);
}
}
i0 = (const float*) ((uintptr_t) i0 + input_reset);
i1 = (const float*) ((uintptr_t) i0 + input_stride);
i2 = (const float*) ((uintptr_t) i1 + input_stride);
i3 = (const float*) ((uintptr_t) i2 + input_stride);
i4 = (const float*) ((uintptr_t) i3 + input_stride);
i5 = (const float*) ((uintptr_t) i4 + input_stride);
i6 = (const float*) ((uintptr_t) i5 + input_stride);
i7 = (const float*) ((uintptr_t) i6 + input_stride);
o = (float*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 14,614
| 40.169014
| 121
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-8x8-multi-switch-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x32_transposec_ukernel__8x8_multi_switch_avx(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(float));
assert(input_stride >= block_width * sizeof(float));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(float);
const size_t tile_wbytes = tile_width * sizeof(float);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(float);
const float* i0 = (const float*) input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
const float* i7 = (const float*) ((uintptr_t) i6 + input_stride);
float* o = (float*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
__m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[rem ^ 7]));
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const __m256 v3_0 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_offset);
const __m256 v3_1 = _mm256_maskload_ps(i1, vmask);
i1 = (float*) ((uintptr_t) i1 + input_offset);
const __m256 v3_2 = _mm256_maskload_ps(i2, vmask);
i2 = (float*) ((uintptr_t) i2 + input_offset);
const __m256 v3_3 = _mm256_maskload_ps(i3, vmask);
i3 = (float*) ((uintptr_t) i3 + input_offset);
const __m256 v3_4 = _mm256_maskload_ps(i4, vmask);
i4 = (float*) ((uintptr_t) i4 + input_offset);
const __m256 v3_5 = _mm256_maskload_ps(i5, vmask);
i5 = (float*) ((uintptr_t) i5 + input_offset);
const __m256 v3_6 = _mm256_maskload_ps(i6, vmask);
i6 = (float*) ((uintptr_t) i6 + input_offset);
const __m256 v3_7 = _mm256_maskload_ps(i7, vmask);
i7 = (float*) ((uintptr_t) i7 + input_offset);
const __m256 v2_0 = _mm256_unpacklo_ps(v3_0, v3_2);
const __m256 v2_1 = _mm256_unpackhi_ps(v3_0, v3_2);
const __m256 v2_2 = _mm256_unpacklo_ps(v3_1, v3_3);
const __m256 v2_3 = _mm256_unpackhi_ps(v3_1, v3_3);
const __m256 v2_4 = _mm256_unpacklo_ps(v3_4, v3_6);
const __m256 v2_5 = _mm256_unpackhi_ps(v3_4, v3_6);
const __m256 v2_6 = _mm256_unpacklo_ps(v3_5, v3_7);
const __m256 v2_7 = _mm256_unpackhi_ps(v3_5, v3_7);
const __m256 v1_0 = _mm256_unpacklo_ps(v2_0, v2_2);
const __m256 v1_1 = _mm256_unpackhi_ps(v2_0, v2_2);
const __m256 v1_2 = _mm256_unpacklo_ps(v2_1, v2_3);
const __m256 v1_3 = _mm256_unpackhi_ps(v2_1, v2_3);
const __m256 v1_4 = _mm256_unpacklo_ps(v2_4, v2_6);
const __m256 v1_5 = _mm256_unpackhi_ps(v2_4, v2_6);
const __m256 v1_6 = _mm256_unpacklo_ps(v2_5, v2_7);
const __m256 v1_7 = _mm256_unpackhi_ps(v2_5, v2_7);
float* oN = (float*) ((uintptr_t) o + oN_stride);
switch (rem) {
default:
XNN_UNREACHABLE;
case 7: {
const __m256 v0_7 = _mm256_permute2f128_ps(v1_3, v1_7, 0x31);
_mm256_storeu_ps(oN, v0_7);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
}
case 6: {
const __m256 v0_6 = _mm256_permute2f128_ps(v1_2, v1_6, 0x31);
_mm256_storeu_ps(oN, v0_6);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
}
case 5: {
const __m256 v0_5 = _mm256_permute2f128_ps(v1_1, v1_5, 0x31);
_mm256_storeu_ps(oN, v0_5);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
}
case 4: {
const __m256 v0_4 = _mm256_permute2f128_ps(v1_0, v1_4, 0x31);
_mm256_storeu_ps(oN, v0_4);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
}
case 3: {
const __m256 v0_3 = _mm256_insertf128_ps(v1_3, _mm256_castps256_ps128(v1_7), 1);
_mm256_storeu_ps(oN, v0_3);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
}
case 2: {
const __m256 v0_2 = _mm256_insertf128_ps(v1_2, _mm256_castps256_ps128(v1_6), 1);
_mm256_storeu_ps(oN, v0_2);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
}
case 1: {
const __m256 v0_1 = _mm256_insertf128_ps(v1_1, _mm256_castps256_ps128(v1_5), 1);
_mm256_storeu_ps( oN, v0_1);
}
case 0: {
const __m256 v0_0 = _mm256_insertf128_ps(v1_0, _mm256_castps256_ps128(v1_4), 1);
_mm256_storeu_ps(o, v0_0);
o = (float*) ((uintptr_t) o + tile_hbytes);
}
}
}
if (bh != 0) {
const __m256 v3_0 = _mm256_maskload_ps(i0, vmask);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m256 v3_1 = _mm256_maskload_ps(i1, vmask);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const __m256 v3_2 = _mm256_maskload_ps(i2, vmask);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i0;
}
const __m256 v3_3 = _mm256_maskload_ps(i3, vmask);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i0;
}
const __m256 v3_4 = _mm256_maskload_ps(i4, vmask);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i0;
}
const __m256 v3_5 = _mm256_maskload_ps(i5, vmask);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i0;
}
const __m256 v3_6 = _mm256_maskload_ps(i6, vmask);
const __m256 v3_7 = _mm256_undefined_ps();
const __m256 v2_0 = _mm256_unpacklo_ps(v3_0, v3_2);
const __m256 v2_1 = _mm256_unpackhi_ps(v3_0, v3_2);
const __m256 v2_2 = _mm256_unpacklo_ps(v3_1, v3_3);
const __m256 v2_3 = _mm256_unpackhi_ps(v3_1, v3_3);
const __m256 v2_4 = _mm256_unpacklo_ps(v3_4, v3_6);
const __m256 v2_5 = _mm256_unpackhi_ps(v3_4, v3_6);
const __m256 v2_6 = _mm256_unpacklo_ps(v3_5, v3_7);
const __m256 v2_7 = _mm256_unpackhi_ps(v3_5, v3_7);
const __m256 v1_0 = _mm256_unpacklo_ps(v2_0, v2_2);
const __m256 v1_1 = _mm256_unpackhi_ps(v2_0, v2_2);
const __m256 v1_2 = _mm256_unpacklo_ps(v2_1, v2_3);
const __m256 v1_3 = _mm256_unpackhi_ps(v2_1, v2_3);
const __m256 v1_4 = _mm256_unpacklo_ps(v2_4, v2_6);
const __m256 v1_5 = _mm256_unpackhi_ps(v2_4, v2_6);
const __m256 v1_6 = _mm256_unpacklo_ps(v2_5, v2_7);
const __m256 v1_7 = _mm256_unpackhi_ps(v2_5, v2_7);
__m256 v0_0 = _mm256_insertf128_ps(v1_0, _mm256_castps256_ps128(v1_4), 1);
__m256 v0_4 = _mm256_permute2f128_ps(v1_0, v1_4, 0x31);
__m256 v0_1 = _mm256_insertf128_ps(v1_1, _mm256_castps256_ps128(v1_5), 1);
__m256 v0_5 = _mm256_permute2f128_ps(v1_1, v1_5, 0x31);
__m256 v0_2 = _mm256_insertf128_ps(v1_2, _mm256_castps256_ps128(v1_6), 1);
__m256 v0_6 = _mm256_permute2f128_ps(v1_2, v1_6, 0x31);
__m256 v0_3 = _mm256_insertf128_ps(v1_3, _mm256_castps256_ps128(v1_7), 1);
__m256 v0_7 = _mm256_permute2f128_ps(v1_3, v1_7, 0x31);
__m128 v0_0_lo = _mm256_castps256_ps128(v0_0);
__m128 v0_1_lo = _mm256_castps256_ps128(v0_1);
__m128 v0_2_lo = _mm256_castps256_ps128(v0_2);
__m128 v0_3_lo = _mm256_castps256_ps128(v0_3);
__m128 v0_4_lo = _mm256_castps256_ps128(v0_4);
__m128 v0_5_lo = _mm256_castps256_ps128(v0_5);
__m128 v0_6_lo = _mm256_castps256_ps128(v0_6);
__m128 v0_7_lo = _mm256_castps256_ps128(v0_7);
if (bh & 4) {
float* oN = (float*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
_mm_storeu_ps(oN, v0_7_lo);
v0_7_lo = _mm256_extractf128_ps(v0_7, 1);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 6:
_mm_storeu_ps(oN, v0_6_lo);
v0_6_lo = _mm256_extractf128_ps(v0_6, 1);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 5:
_mm_storeu_ps(oN, v0_5_lo);
v0_5_lo = _mm256_extractf128_ps(v0_5, 1);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 4:
_mm_storeu_ps(oN, v0_4_lo);
v0_4_lo = _mm256_extractf128_ps(v0_4, 1);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 3:
_mm_storeu_ps(oN, v0_3_lo);
v0_3_lo = _mm256_extractf128_ps(v0_3, 1);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storeu_ps(oN, v0_2_lo);
v0_2_lo = _mm256_extractf128_ps(v0_2, 1);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storeu_ps(oN, v0_1_lo);
v0_1_lo = _mm256_extractf128_ps(v0_1, 1);
case 0:
_mm_storeu_ps(o, v0_0_lo);
v0_0_lo = _mm256_extractf128_ps(v0_0, 1);
break;
default:
XNN_UNREACHABLE;
}
o += 4;
}
if (bh & 2) {
float* oN = (float*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
_mm_storel_pi((__m64*) oN, v0_7_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 6:
_mm_storel_pi((__m64*) oN, v0_6_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 5:
_mm_storel_pi((__m64*) oN, v0_5_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 4:
_mm_storel_pi((__m64*) oN, v0_4_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 3:
_mm_storel_pi((__m64*) oN, v0_3_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storel_pi((__m64*) oN, v0_2_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storel_pi((__m64*) oN, v0_1_lo);
case 0:
_mm_storel_pi((__m64*) o, v0_0_lo);
break;
default:
XNN_UNREACHABLE;
}
o += 2;
v0_0_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_0_lo), _mm_castps_pd(v0_0_lo)));
v0_1_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_1_lo), _mm_castps_pd(v0_1_lo)));
v0_2_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_2_lo), _mm_castps_pd(v0_2_lo)));
v0_3_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_3_lo), _mm_castps_pd(v0_3_lo)));
v0_4_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_4_lo), _mm_castps_pd(v0_4_lo)));
v0_5_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_5_lo), _mm_castps_pd(v0_5_lo)));
v0_6_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_6_lo), _mm_castps_pd(v0_6_lo)));
v0_7_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_7_lo), _mm_castps_pd(v0_7_lo)));
}
if (bh & 1) {
float* oN = (float*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
_mm_store_ss(oN, v0_7_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 6:
_mm_store_ss(oN, v0_6_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 5:
_mm_store_ss(oN, v0_5_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 4:
_mm_store_ss(oN, v0_4_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 3:
_mm_store_ss(oN, v0_3_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_store_ss(oN, v0_2_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_store_ss(oN, v0_1_lo);
case 0:
_mm_store_ss(o, v0_0_lo);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const float*) ((uintptr_t) i0 + input_reset);
i1 = (const float*) ((uintptr_t) i0 + input_stride);
i2 = (const float*) ((uintptr_t) i1 + input_stride);
i3 = (const float*) ((uintptr_t) i2 + input_stride);
i4 = (const float*) ((uintptr_t) i3 + input_stride);
i5 = (const float*) ((uintptr_t) i4 + input_stride);
i6 = (const float*) ((uintptr_t) i5 + input_stride);
i7 = (const float*) ((uintptr_t) i6 + input_stride);
o = (float*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 13,567
| 40.747692
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-8x8-reuse-mov-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x32_transposec_ukernel__8x8_reuse_mov_avx(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(float));
assert(input_stride >= block_width * sizeof(float));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(float);
const size_t tile_wbytes = tile_width * sizeof(float);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(float) - tile_hbytes;
const float* i0 = (const float*) input;
float* o = (float*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
__m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[rem ^ 7]));
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const __m256 v3_0 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_1 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_2 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_3 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_4 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_5 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_6 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_7 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v2_0 = _mm256_unpacklo_ps(v3_0, v3_2);
const __m256 v2_1 = _mm256_unpackhi_ps(v3_0, v3_2);
const __m256 v2_2 = _mm256_unpacklo_ps(v3_1, v3_3);
const __m256 v2_3 = _mm256_unpackhi_ps(v3_1, v3_3);
const __m256 v2_4 = _mm256_unpacklo_ps(v3_4, v3_6);
const __m256 v2_5 = _mm256_unpackhi_ps(v3_4, v3_6);
const __m256 v2_6 = _mm256_unpacklo_ps(v3_5, v3_7);
const __m256 v2_7 = _mm256_unpackhi_ps(v3_5, v3_7);
const __m256 v1_0 = _mm256_unpacklo_ps(v2_0, v2_2);
const __m256 v1_1 = _mm256_unpackhi_ps(v2_0, v2_2);
const __m256 v1_2 = _mm256_unpacklo_ps(v2_1, v2_3);
const __m256 v1_3 = _mm256_unpackhi_ps(v2_1, v2_3);
const __m256 v1_4 = _mm256_unpacklo_ps(v2_4, v2_6);
const __m256 v1_5 = _mm256_unpackhi_ps(v2_4, v2_6);
const __m256 v1_6 = _mm256_unpacklo_ps(v2_5, v2_7);
const __m256 v1_7 = _mm256_unpackhi_ps(v2_5, v2_7);
const __m256 v0_0 = _mm256_insertf128_ps(v1_0, _mm256_castps256_ps128(v1_4), 1);
const __m256 v0_4 = _mm256_permute2f128_ps(v1_0, v1_4, 0x31);
const __m256 v0_1 = _mm256_insertf128_ps(v1_1, _mm256_castps256_ps128(v1_5), 1);
const __m256 v0_5 = _mm256_permute2f128_ps(v1_1, v1_5, 0x31);
const __m256 v0_2 = _mm256_insertf128_ps(v1_2, _mm256_castps256_ps128(v1_6), 1);
const __m256 v0_6 = _mm256_permute2f128_ps(v1_2, v1_6, 0x31);
const __m256 v0_3 = _mm256_insertf128_ps(v1_3, _mm256_castps256_ps128(v1_7), 1);
const __m256 v0_7 = _mm256_permute2f128_ps(v1_3, v1_7, 0x31);
o = (float*) ((uintptr_t) o + oN_offset);
_mm256_storeu_ps(o, v0_7);
float *oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
_mm256_storeu_ps(o, v0_6);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
_mm256_storeu_ps(o, v0_5);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
_mm256_storeu_ps(o, v0_4);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
_mm256_storeu_ps(o, v0_3);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm256_storeu_ps(o, v0_2);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm256_storeu_ps(o, v0_1);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm256_storeu_ps(o, v0_0);
}
o = (float*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const __m256 v3_0 = _mm256_maskload_ps(i0, vmask);
const float *i1 = (const float*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m256 v3_1 = _mm256_maskload_ps(i1, vmask);
const float *i2 = (const float*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const __m256 v3_2 = _mm256_maskload_ps(i2, vmask);
const float *i3 = (const float*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const __m256 v3_3 = _mm256_maskload_ps(i3, vmask);
const float *i4 = (const float*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const __m256 v3_4 = _mm256_maskload_ps(i4, vmask);
const float *i5 = (const float*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const __m256 v3_5 = _mm256_maskload_ps(i5, vmask);
const float *i6 = (const float*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const __m256 v3_6 = _mm256_maskload_ps(i6, vmask);
const __m256 v3_7 = _mm256_undefined_ps();
const __m256 v2_0 = _mm256_unpacklo_ps(v3_0, v3_2);
const __m256 v2_1 = _mm256_unpackhi_ps(v3_0, v3_2);
const __m256 v2_2 = _mm256_unpacklo_ps(v3_1, v3_3);
const __m256 v2_3 = _mm256_unpackhi_ps(v3_1, v3_3);
const __m256 v2_4 = _mm256_unpacklo_ps(v3_4, v3_6);
const __m256 v2_5 = _mm256_unpackhi_ps(v3_4, v3_6);
const __m256 v2_6 = _mm256_unpacklo_ps(v3_5, v3_7);
const __m256 v2_7 = _mm256_unpackhi_ps(v3_5, v3_7);
const __m256 v1_0 = _mm256_unpacklo_ps(v2_0, v2_2);
const __m256 v1_1 = _mm256_unpackhi_ps(v2_0, v2_2);
const __m256 v1_2 = _mm256_unpacklo_ps(v2_1, v2_3);
const __m256 v1_3 = _mm256_unpackhi_ps(v2_1, v2_3);
const __m256 v1_4 = _mm256_unpacklo_ps(v2_4, v2_6);
const __m256 v1_5 = _mm256_unpackhi_ps(v2_4, v2_6);
const __m256 v1_6 = _mm256_unpacklo_ps(v2_5, v2_7);
const __m256 v1_7 = _mm256_unpackhi_ps(v2_5, v2_7);
__m256 v0_0 = _mm256_insertf128_ps(v1_0, _mm256_castps256_ps128(v1_4), 1);
__m256 v0_4 = _mm256_permute2f128_ps(v1_0, v1_4, 0x31);
__m256 v0_1 = _mm256_insertf128_ps(v1_1, _mm256_castps256_ps128(v1_5), 1);
__m256 v0_5 = _mm256_permute2f128_ps(v1_1, v1_5, 0x31);
__m256 v0_2 = _mm256_insertf128_ps(v1_2, _mm256_castps256_ps128(v1_6), 1);
__m256 v0_6 = _mm256_permute2f128_ps(v1_2, v1_6, 0x31);
__m256 v0_3 = _mm256_insertf128_ps(v1_3, _mm256_castps256_ps128(v1_7), 1);
__m256 v0_7 = _mm256_permute2f128_ps(v1_3, v1_7, 0x31);
__m128 v0_0_lo = _mm256_castps256_ps128(v0_0);
__m128 v0_1_lo = _mm256_castps256_ps128(v0_1);
__m128 v0_2_lo = _mm256_castps256_ps128(v0_2);
__m128 v0_3_lo = _mm256_castps256_ps128(v0_3);
__m128 v0_4_lo = _mm256_castps256_ps128(v0_4);
__m128 v0_5_lo = _mm256_castps256_ps128(v0_5);
__m128 v0_6_lo = _mm256_castps256_ps128(v0_6);
__m128 v0_7_lo = _mm256_castps256_ps128(v0_7);
if (bh & 4) {
o = (float*) ((uintptr_t) o + oN_stride);
_mm_storeu_ps(o, v0_7_lo);
v0_7_lo = _mm256_extractf128_ps(v0_7, 1);
float *oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
_mm_storeu_ps(o, v0_6_lo);
v0_6_lo = _mm256_extractf128_ps(v0_6, 1);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
_mm_storeu_ps(o, v0_5_lo);
v0_5_lo = _mm256_extractf128_ps(v0_5, 1);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
_mm_storeu_ps(o, v0_4_lo);
v0_4_lo = _mm256_extractf128_ps(v0_4, 1);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
_mm_storeu_ps(o, v0_3_lo);
v0_3_lo = _mm256_extractf128_ps(v0_3, 1);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storeu_ps(o, v0_2_lo);
v0_2_lo = _mm256_extractf128_ps(v0_2, 1);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storeu_ps(o, v0_1_lo);
v0_1_lo = _mm256_extractf128_ps(v0_1, 1);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storeu_ps(o, v0_0_lo);
v0_0_lo = _mm256_extractf128_ps(v0_0, 1);
o += 4;
}
if (bh & 2) {
o = (float*) ((uintptr_t) o + oN_stride);
_mm_storel_pi((__m64*) o, v0_7_lo);
float *oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
_mm_storel_pi((__m64*) o, v0_6_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
_mm_storel_pi((__m64*) o, v0_5_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
_mm_storel_pi((__m64*) o, v0_4_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
_mm_storel_pi((__m64*) o, v0_3_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storel_pi((__m64*) o, v0_2_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storel_pi((__m64*) o, v0_1_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storel_pi((__m64*) o, v0_0_lo);
o += 2;
v0_0_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_0_lo), _mm_castps_pd(v0_0_lo)));
v0_1_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_1_lo), _mm_castps_pd(v0_1_lo)));
v0_2_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_2_lo), _mm_castps_pd(v0_2_lo)));
v0_3_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_3_lo), _mm_castps_pd(v0_3_lo)));
v0_4_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_4_lo), _mm_castps_pd(v0_4_lo)));
v0_5_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_5_lo), _mm_castps_pd(v0_5_lo)));
v0_6_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_6_lo), _mm_castps_pd(v0_6_lo)));
v0_7_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_7_lo), _mm_castps_pd(v0_7_lo)));
}
if (bh & 1) {
o = (float*) ((uintptr_t) o + oN_stride);
_mm_store_ss(o, v0_7_lo);
float* oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
_mm_store_ss(o, v0_6_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
_mm_store_ss(o, v0_5_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
_mm_store_ss(o, v0_4_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
_mm_store_ss(o, v0_3_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_store_ss(o, v0_2_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_store_ss(o, v0_1_lo);
oN = (float*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_store_ss(o, v0_0_lo);
}
}
i0 = (const float*) ((uintptr_t) i0 + input_reset);
o = (float*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 14,113
| 39.791908
| 121
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-8x8-reuse-multi-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x32_transposec_ukernel__8x8_reuse_multi_avx(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(float));
assert(input_stride >= block_width * sizeof(float));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(float);
const size_t tile_wbytes = tile_width * sizeof(float);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(float);
const float* i0 = (const float*) input;
float* o0 = (float*) output;
do {
float* o1 = (float*) (block_width < 2 ? o0 : (float*) ((uintptr_t) o0 + output_stride));
float* o2 = (float*) (block_width <= 2 ? o0 : (float*) ((uintptr_t) o1 + output_stride));
float* o3 = (float*) (block_width < 4 ? o0 : (float*) ((uintptr_t) o2 + output_stride));
float* o4 = (float*) (block_width <= 4 ? o0 : (float*) ((uintptr_t) o3 + output_stride));
float* o5 = (float*) (block_width < 6 ? o0 : (float*) ((uintptr_t) o4 + output_stride));
float* o6 = (float*) (block_width <= 6 ? o0 : (float*) ((uintptr_t) o5 + output_stride));
float* o7 = (float*) (block_width < 8 ? o0 : (float*) ((uintptr_t) o6 + output_stride));
const size_t rem = min(block_width - 1, 7);
__m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[rem ^ 7]));
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const __m256 v3_0 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_1 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_2 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_3 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_4 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_5 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_6 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_7 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v2_0 = _mm256_unpacklo_ps(v3_0, v3_2);
const __m256 v2_1 = _mm256_unpackhi_ps(v3_0, v3_2);
const __m256 v2_2 = _mm256_unpacklo_ps(v3_1, v3_3);
const __m256 v2_3 = _mm256_unpackhi_ps(v3_1, v3_3);
const __m256 v2_4 = _mm256_unpacklo_ps(v3_4, v3_6);
const __m256 v2_5 = _mm256_unpackhi_ps(v3_4, v3_6);
const __m256 v2_6 = _mm256_unpacklo_ps(v3_5, v3_7);
const __m256 v2_7 = _mm256_unpackhi_ps(v3_5, v3_7);
const __m256 v1_0 = _mm256_unpacklo_ps(v2_0, v2_2);
const __m256 v1_1 = _mm256_unpackhi_ps(v2_0, v2_2);
const __m256 v1_2 = _mm256_unpacklo_ps(v2_1, v2_3);
const __m256 v1_3 = _mm256_unpackhi_ps(v2_1, v2_3);
const __m256 v1_4 = _mm256_unpacklo_ps(v2_4, v2_6);
const __m256 v1_5 = _mm256_unpackhi_ps(v2_4, v2_6);
const __m256 v1_6 = _mm256_unpacklo_ps(v2_5, v2_7);
const __m256 v1_7 = _mm256_unpackhi_ps(v2_5, v2_7);
const __m256 v0_0 = _mm256_insertf128_ps(v1_0, _mm256_castps256_ps128(v1_4), 1);
const __m256 v0_4 = _mm256_permute2f128_ps(v1_0, v1_4, 0x31);
const __m256 v0_1 = _mm256_insertf128_ps(v1_1, _mm256_castps256_ps128(v1_5), 1);
const __m256 v0_5 = _mm256_permute2f128_ps(v1_1, v1_5, 0x31);
const __m256 v0_2 = _mm256_insertf128_ps(v1_2, _mm256_castps256_ps128(v1_6), 1);
const __m256 v0_6 = _mm256_permute2f128_ps(v1_2, v1_6, 0x31);
const __m256 v0_3 = _mm256_insertf128_ps(v1_3, _mm256_castps256_ps128(v1_7), 1);
const __m256 v0_7 = _mm256_permute2f128_ps(v1_3, v1_7, 0x31);
_mm256_storeu_ps(o7, v0_7);
o7 = (float*) ((uintptr_t) o7 + tile_hbytes);
_mm256_storeu_ps(o6, v0_6);
o6 = (float*) ((uintptr_t) o6 + tile_hbytes);
_mm256_storeu_ps(o5, v0_5);
o5 = (float*) ((uintptr_t) o5 + tile_hbytes);
_mm256_storeu_ps(o4, v0_4);
o4 = (float*) ((uintptr_t) o4 + tile_hbytes);
_mm256_storeu_ps(o3, v0_3);
o3 = (float*) ((uintptr_t) o3 + tile_hbytes);
_mm256_storeu_ps(o2, v0_2);
o2 = (float*) ((uintptr_t) o2 + tile_hbytes);
_mm256_storeu_ps(o1, v0_1);
o1 = (float*) ((uintptr_t) o1 + tile_hbytes);
_mm256_storeu_ps(o0, v0_0);
o0 = (float*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const __m256 v3_0 = _mm256_maskload_ps(i0, vmask);
const float *i1 = (const float*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m256 v3_1 = _mm256_maskload_ps(i1, vmask);
const float *i2 = (const float*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const __m256 v3_2 = _mm256_maskload_ps(i2, vmask);
const float *i3 = (const float*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const __m256 v3_3 = _mm256_maskload_ps(i3, vmask);
const float *i4 = (const float*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const __m256 v3_4 = _mm256_maskload_ps(i4, vmask);
const float *i5 = (const float*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const __m256 v3_5 = _mm256_maskload_ps(i5, vmask);
const float *i6 = (const float*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const __m256 v3_6 = _mm256_maskload_ps(i6, vmask);
const __m256 v3_7 = _mm256_undefined_ps();
const __m256 v2_0 = _mm256_unpacklo_ps(v3_0, v3_2);
const __m256 v2_1 = _mm256_unpackhi_ps(v3_0, v3_2);
const __m256 v2_2 = _mm256_unpacklo_ps(v3_1, v3_3);
const __m256 v2_3 = _mm256_unpackhi_ps(v3_1, v3_3);
const __m256 v2_4 = _mm256_unpacklo_ps(v3_4, v3_6);
const __m256 v2_5 = _mm256_unpackhi_ps(v3_4, v3_6);
const __m256 v2_6 = _mm256_unpacklo_ps(v3_5, v3_7);
const __m256 v2_7 = _mm256_unpackhi_ps(v3_5, v3_7);
const __m256 v1_0 = _mm256_unpacklo_ps(v2_0, v2_2);
const __m256 v1_1 = _mm256_unpackhi_ps(v2_0, v2_2);
const __m256 v1_2 = _mm256_unpacklo_ps(v2_1, v2_3);
const __m256 v1_3 = _mm256_unpackhi_ps(v2_1, v2_3);
const __m256 v1_4 = _mm256_unpacklo_ps(v2_4, v2_6);
const __m256 v1_5 = _mm256_unpackhi_ps(v2_4, v2_6);
const __m256 v1_6 = _mm256_unpacklo_ps(v2_5, v2_7);
const __m256 v1_7 = _mm256_unpackhi_ps(v2_5, v2_7);
__m256 v0_0 = _mm256_insertf128_ps(v1_0, _mm256_castps256_ps128(v1_4), 1);
__m256 v0_4 = _mm256_permute2f128_ps(v1_0, v1_4, 0x31);
__m256 v0_1 = _mm256_insertf128_ps(v1_1, _mm256_castps256_ps128(v1_5), 1);
__m256 v0_5 = _mm256_permute2f128_ps(v1_1, v1_5, 0x31);
__m256 v0_2 = _mm256_insertf128_ps(v1_2, _mm256_castps256_ps128(v1_6), 1);
__m256 v0_6 = _mm256_permute2f128_ps(v1_2, v1_6, 0x31);
__m256 v0_3 = _mm256_insertf128_ps(v1_3, _mm256_castps256_ps128(v1_7), 1);
__m256 v0_7 = _mm256_permute2f128_ps(v1_3, v1_7, 0x31);
__m128 v0_0_lo = _mm256_castps256_ps128(v0_0);
__m128 v0_1_lo = _mm256_castps256_ps128(v0_1);
__m128 v0_2_lo = _mm256_castps256_ps128(v0_2);
__m128 v0_3_lo = _mm256_castps256_ps128(v0_3);
__m128 v0_4_lo = _mm256_castps256_ps128(v0_4);
__m128 v0_5_lo = _mm256_castps256_ps128(v0_5);
__m128 v0_6_lo = _mm256_castps256_ps128(v0_6);
__m128 v0_7_lo = _mm256_castps256_ps128(v0_7);
if (bh & 4) {
_mm_storeu_ps(o7, v0_7_lo);
v0_7_lo = _mm256_extractf128_ps(v0_7, 1);
o7 += 4;
_mm_storeu_ps(o6, v0_6_lo);
v0_6_lo = _mm256_extractf128_ps(v0_6, 1);
o6 += 4;
_mm_storeu_ps(o5, v0_5_lo);
v0_5_lo = _mm256_extractf128_ps(v0_5, 1);
o5 += 4;
_mm_storeu_ps(o4, v0_4_lo);
v0_4_lo = _mm256_extractf128_ps(v0_4, 1);
o4 += 4;
_mm_storeu_ps(o3, v0_3_lo);
v0_3_lo = _mm256_extractf128_ps(v0_3, 1);
o3 += 4;
_mm_storeu_ps(o2, v0_2_lo);
v0_2_lo = _mm256_extractf128_ps(v0_2, 1);
o2 += 4;
_mm_storeu_ps(o1, v0_1_lo);
v0_1_lo = _mm256_extractf128_ps(v0_1, 1);
o1 += 4;
_mm_storeu_ps(o0, v0_0_lo);
v0_0_lo = _mm256_extractf128_ps(v0_0, 1);
o0 += 4;
}
if (bh & 2) {
_mm_storel_pi((__m64*) o7, v0_7_lo);
o7 += 2;
_mm_storel_pi((__m64*) o6, v0_6_lo);
o6 += 2;
_mm_storel_pi((__m64*) o5, v0_5_lo);
o5 += 2;
_mm_storel_pi((__m64*) o4, v0_4_lo);
o4 += 2;
_mm_storel_pi((__m64*) o3, v0_3_lo);
o3 += 2;
_mm_storel_pi((__m64*) o2, v0_2_lo);
o2 += 2;
_mm_storel_pi((__m64*) o1, v0_1_lo);
o1 += 2;
_mm_storel_pi((__m64*) o0, v0_0_lo);
o0 += 2;
v0_0_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_0_lo), _mm_castps_pd(v0_0_lo)));
v0_1_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_1_lo), _mm_castps_pd(v0_1_lo)));
v0_2_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_2_lo), _mm_castps_pd(v0_2_lo)));
v0_3_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_3_lo), _mm_castps_pd(v0_3_lo)));
v0_4_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_4_lo), _mm_castps_pd(v0_4_lo)));
v0_5_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_5_lo), _mm_castps_pd(v0_5_lo)));
v0_6_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_6_lo), _mm_castps_pd(v0_6_lo)));
v0_7_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_7_lo), _mm_castps_pd(v0_7_lo)));
}
if (bh & 1) {
_mm_store_ss(o7, v0_7_lo);
_mm_store_ss(o6, v0_6_lo);
_mm_store_ss(o5, v0_5_lo);
_mm_store_ss(o4, v0_4_lo);
_mm_store_ss(o3, v0_3_lo);
_mm_store_ss(o2, v0_2_lo);
_mm_store_ss(o1, v0_1_lo);
_mm_store_ss(o0, v0_0_lo);
}
}
i0 = (const float*) ((uintptr_t) i0 + input_reset);
o0 = (float*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 11,195
| 42.905882
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/gen/x32-transposec-8x8-reuse-switch-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x32_transposec_ukernel__8x8_reuse_switch_avx(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(float));
assert(input_stride >= block_width * sizeof(float));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(float);
const size_t tile_wbytes = tile_width * sizeof(float);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(float);
const float* i0 = (const float*) input;
float* o = (float*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
__m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[rem ^ 7]));
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const __m256 v3_0 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_1 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_2 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_3 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_4 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_5 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_6 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v3_7 = _mm256_maskload_ps(i0, vmask);
i0 = (float*) ((uintptr_t) i0 + input_stride);
const __m256 v2_0 = _mm256_unpacklo_ps(v3_0, v3_2);
const __m256 v2_1 = _mm256_unpackhi_ps(v3_0, v3_2);
const __m256 v2_2 = _mm256_unpacklo_ps(v3_1, v3_3);
const __m256 v2_3 = _mm256_unpackhi_ps(v3_1, v3_3);
const __m256 v2_4 = _mm256_unpacklo_ps(v3_4, v3_6);
const __m256 v2_5 = _mm256_unpackhi_ps(v3_4, v3_6);
const __m256 v2_6 = _mm256_unpacklo_ps(v3_5, v3_7);
const __m256 v2_7 = _mm256_unpackhi_ps(v3_5, v3_7);
const __m256 v1_0 = _mm256_unpacklo_ps(v2_0, v2_2);
const __m256 v1_1 = _mm256_unpackhi_ps(v2_0, v2_2);
const __m256 v1_2 = _mm256_unpacklo_ps(v2_1, v2_3);
const __m256 v1_3 = _mm256_unpackhi_ps(v2_1, v2_3);
const __m256 v1_4 = _mm256_unpacklo_ps(v2_4, v2_6);
const __m256 v1_5 = _mm256_unpackhi_ps(v2_4, v2_6);
const __m256 v1_6 = _mm256_unpacklo_ps(v2_5, v2_7);
const __m256 v1_7 = _mm256_unpackhi_ps(v2_5, v2_7);
float* oN = (float*) ((uintptr_t) o + oN_stride);
switch (rem) {
default:
XNN_UNREACHABLE;
case 7: {
const __m256 v0_7 = _mm256_permute2f128_ps(v1_3, v1_7, 0x31);
_mm256_storeu_ps(oN, v0_7);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
}
case 6: {
const __m256 v0_6 = _mm256_permute2f128_ps(v1_2, v1_6, 0x31);
_mm256_storeu_ps(oN, v0_6);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
}
case 5: {
const __m256 v0_5 = _mm256_permute2f128_ps(v1_1, v1_5, 0x31);
_mm256_storeu_ps(oN, v0_5);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
}
case 4: {
const __m256 v0_4 = _mm256_permute2f128_ps(v1_0, v1_4, 0x31);
_mm256_storeu_ps(oN, v0_4);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
}
case 3: {
const __m256 v0_3 = _mm256_insertf128_ps(v1_3, _mm256_castps256_ps128(v1_7), 1);
_mm256_storeu_ps(oN, v0_3);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
}
case 2: {
const __m256 v0_2 = _mm256_insertf128_ps(v1_2, _mm256_castps256_ps128(v1_6), 1);
_mm256_storeu_ps(oN, v0_2);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
}
case 1: {
const __m256 v0_1 = _mm256_insertf128_ps(v1_1, _mm256_castps256_ps128(v1_5), 1);
_mm256_storeu_ps( oN, v0_1);
}
case 0: {
const __m256 v0_0 = _mm256_insertf128_ps(v1_0, _mm256_castps256_ps128(v1_4), 1);
_mm256_storeu_ps(o, v0_0);
o = (float*) ((uintptr_t) o + tile_hbytes);
}
}
}
if (bh != 0) {
const __m256 v3_0 = _mm256_maskload_ps(i0, vmask);
const float *i1 = (const float*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m256 v3_1 = _mm256_maskload_ps(i1, vmask);
const float *i2 = (const float*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const __m256 v3_2 = _mm256_maskload_ps(i2, vmask);
const float *i3 = (const float*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const __m256 v3_3 = _mm256_maskload_ps(i3, vmask);
const float *i4 = (const float*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const __m256 v3_4 = _mm256_maskload_ps(i4, vmask);
const float *i5 = (const float*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const __m256 v3_5 = _mm256_maskload_ps(i5, vmask);
const float *i6 = (const float*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const __m256 v3_6 = _mm256_maskload_ps(i6, vmask);
const __m256 v3_7 = _mm256_undefined_ps();
const __m256 v2_0 = _mm256_unpacklo_ps(v3_0, v3_2);
const __m256 v2_1 = _mm256_unpackhi_ps(v3_0, v3_2);
const __m256 v2_2 = _mm256_unpacklo_ps(v3_1, v3_3);
const __m256 v2_3 = _mm256_unpackhi_ps(v3_1, v3_3);
const __m256 v2_4 = _mm256_unpacklo_ps(v3_4, v3_6);
const __m256 v2_5 = _mm256_unpackhi_ps(v3_4, v3_6);
const __m256 v2_6 = _mm256_unpacklo_ps(v3_5, v3_7);
const __m256 v2_7 = _mm256_unpackhi_ps(v3_5, v3_7);
const __m256 v1_0 = _mm256_unpacklo_ps(v2_0, v2_2);
const __m256 v1_1 = _mm256_unpackhi_ps(v2_0, v2_2);
const __m256 v1_2 = _mm256_unpacklo_ps(v2_1, v2_3);
const __m256 v1_3 = _mm256_unpackhi_ps(v2_1, v2_3);
const __m256 v1_4 = _mm256_unpacklo_ps(v2_4, v2_6);
const __m256 v1_5 = _mm256_unpackhi_ps(v2_4, v2_6);
const __m256 v1_6 = _mm256_unpacklo_ps(v2_5, v2_7);
const __m256 v1_7 = _mm256_unpackhi_ps(v2_5, v2_7);
__m256 v0_0 = _mm256_insertf128_ps(v1_0, _mm256_castps256_ps128(v1_4), 1);
__m256 v0_4 = _mm256_permute2f128_ps(v1_0, v1_4, 0x31);
__m256 v0_1 = _mm256_insertf128_ps(v1_1, _mm256_castps256_ps128(v1_5), 1);
__m256 v0_5 = _mm256_permute2f128_ps(v1_1, v1_5, 0x31);
__m256 v0_2 = _mm256_insertf128_ps(v1_2, _mm256_castps256_ps128(v1_6), 1);
__m256 v0_6 = _mm256_permute2f128_ps(v1_2, v1_6, 0x31);
__m256 v0_3 = _mm256_insertf128_ps(v1_3, _mm256_castps256_ps128(v1_7), 1);
__m256 v0_7 = _mm256_permute2f128_ps(v1_3, v1_7, 0x31);
__m128 v0_0_lo = _mm256_castps256_ps128(v0_0);
__m128 v0_1_lo = _mm256_castps256_ps128(v0_1);
__m128 v0_2_lo = _mm256_castps256_ps128(v0_2);
__m128 v0_3_lo = _mm256_castps256_ps128(v0_3);
__m128 v0_4_lo = _mm256_castps256_ps128(v0_4);
__m128 v0_5_lo = _mm256_castps256_ps128(v0_5);
__m128 v0_6_lo = _mm256_castps256_ps128(v0_6);
__m128 v0_7_lo = _mm256_castps256_ps128(v0_7);
if (bh & 4) {
float* oN = (float*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
_mm_storeu_ps(oN, v0_7_lo);
v0_7_lo = _mm256_extractf128_ps(v0_7, 1);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 6:
_mm_storeu_ps(oN, v0_6_lo);
v0_6_lo = _mm256_extractf128_ps(v0_6, 1);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 5:
_mm_storeu_ps(oN, v0_5_lo);
v0_5_lo = _mm256_extractf128_ps(v0_5, 1);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 4:
_mm_storeu_ps(oN, v0_4_lo);
v0_4_lo = _mm256_extractf128_ps(v0_4, 1);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 3:
_mm_storeu_ps(oN, v0_3_lo);
v0_3_lo = _mm256_extractf128_ps(v0_3, 1);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storeu_ps(oN, v0_2_lo);
v0_2_lo = _mm256_extractf128_ps(v0_2, 1);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storeu_ps(oN, v0_1_lo);
v0_1_lo = _mm256_extractf128_ps(v0_1, 1);
case 0:
_mm_storeu_ps(o, v0_0_lo);
v0_0_lo = _mm256_extractf128_ps(v0_0, 1);
break;
default:
XNN_UNREACHABLE;
}
o += 4;
}
if (bh & 2) {
float* oN = (float*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
_mm_storel_pi((__m64*) oN, v0_7_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 6:
_mm_storel_pi((__m64*) oN, v0_6_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 5:
_mm_storel_pi((__m64*) oN, v0_5_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 4:
_mm_storel_pi((__m64*) oN, v0_4_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 3:
_mm_storel_pi((__m64*) oN, v0_3_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storel_pi((__m64*) oN, v0_2_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storel_pi((__m64*) oN, v0_1_lo);
case 0:
_mm_storel_pi((__m64*) o, v0_0_lo);
break;
default:
XNN_UNREACHABLE;
}
o += 2;
v0_0_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_0_lo), _mm_castps_pd(v0_0_lo)));
v0_1_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_1_lo), _mm_castps_pd(v0_1_lo)));
v0_2_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_2_lo), _mm_castps_pd(v0_2_lo)));
v0_3_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_3_lo), _mm_castps_pd(v0_3_lo)));
v0_4_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_4_lo), _mm_castps_pd(v0_4_lo)));
v0_5_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_5_lo), _mm_castps_pd(v0_5_lo)));
v0_6_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_6_lo), _mm_castps_pd(v0_6_lo)));
v0_7_lo = _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(v0_7_lo), _mm_castps_pd(v0_7_lo)));
}
if (bh & 1) {
float* oN = (float*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
_mm_store_ss(oN, v0_7_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 6:
_mm_store_ss(oN, v0_6_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 5:
_mm_store_ss(oN, v0_5_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 4:
_mm_store_ss(oN, v0_4_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 3:
_mm_store_ss(oN, v0_3_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_store_ss(oN, v0_2_lo);
oN = (float*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_store_ss(oN, v0_1_lo);
case 0:
_mm_store_ss(o, v0_0_lo);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const float*) ((uintptr_t) i0 + input_reset);
o = (float*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 13,066
| 40.351266
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-unpool/x32-unpool-neon.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/unpool.h>
void xnn_x32_unpool_ukernel__neon(
size_t kernel_elements,
size_t channels,
uint32_t fill,
const uint32_t* input,
const uint32_t* index,
uint32_t** output)
{
// Pre-initialize outputs with constant.
const uint32x4_t vfill = vdupq_n_u32(fill);
uint32_t** os = output;
do {
uint32_t* o = *os++;
size_t c = channels;
for (; c >= 4; c -= 4) {
vst1q_u32(o, vfill); o += 4;
}
if (c != 0) {
if (c & 2) {
vst1_u32(o, vget_low_u32(vfill)); o += 2;
}
if (c & 1) {
vst1q_lane_u32(o, vfill, 0);
}
}
} while (--kernel_elements != 0);
// Copy indexed elements to output.
size_t offset = 0;
do {
const uint32_t i = *index++;
*((uint32_t*) ((uintptr_t) output[i] + offset)) = *input++;
offset += sizeof(uint32_t);
} while (--channels != 0);
}
| 1,091
| 21.75
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-unpool/x32-unpool-scalar.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/unpool.h>
void xnn_x32_unpool_ukernel__scalar(
size_t kernel_elements,
size_t channels,
uint32_t fill,
const uint32_t* input,
const uint32_t* index,
uint32_t** output)
{
// Pre-initialize outputs with constant.
uint32_t** os = output;
do {
uint32_t* o = *os++;
size_t c = channels;
do {
*o++ = fill;
} while (--c != 0);
} while (--kernel_elements != 0);
// Copy indexed elements to output.
size_t offset = 0;
do {
const uint32_t i = *index++;
*((uint32_t*) ((uintptr_t) output[i] + offset)) = *input++;
offset += sizeof(uint32_t);
} while (--channels != 0);
}
| 841
| 21.756757
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-unpool/x32-unpool-sse2.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unpool.h>
void xnn_x32_unpool_ukernel__sse2(
size_t kernel_elements,
size_t channels,
uint32_t fill,
const uint32_t* input,
const uint32_t* index,
uint32_t** output)
{
// Pre-initialize outputs with constant.
const __m128i vfill = _mm_set1_epi32((int) fill);
uint32_t** os = output;
do {
uint32_t* o = *os++;
size_t c = channels;
for (; c >= 4; c -= 4) {
_mm_storeu_si128((__m128i*) o, vfill);
o += 4;
}
if (c != 0) {
if (c & 2) {
_mm_storel_epi64((__m128i*) o, vfill);
o += 2;
}
if (c & 1) {
*((int*) o) = _mm_cvtsi128_si32(vfill);
}
}
} while (--kernel_elements != 0);
// Copy indexed elements to output.
size_t offset = 0;
do {
const uint32_t i = *index++;
*((uint32_t*) ((uintptr_t) output[i] + offset)) = *input++;
offset += sizeof(uint32_t);
} while (--channels != 0);
}
| 1,146
| 21.94
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-unpool/x32-unpool-wasmsimd.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/unpool.h>
void xnn_x32_unpool_ukernel__wasmsimd(
size_t kernel_elements,
size_t channels,
uint32_t fill,
const uint32_t* input,
const uint32_t* index,
uint32_t** output)
{
// Pre-initialize outputs with constant.
const v128_t vfill = wasm_i32x4_splat(fill);
uint32_t** os = output;
do {
float* o = (float*) *os++;
size_t c = channels;
for (; c >= 4; c -= 4) {
wasm_v128_store(o, vfill);
o += 4;
}
if (c != 0) {
if (c & 2) {
wasm_v128_store64_lane(o, vfill, 0);
o += 2;
}
if (c & 1) {
wasm_v128_store32_lane(o, vfill, 0);
}
}
} while (--kernel_elements != 0);
// Copy indexed elements to output.
size_t offset = 0;
do {
const uint32_t i = *index++;
*((uint32_t*) ((uintptr_t) output[i] + offset)) = *input++;
offset += sizeof(uint32_t);
} while (--channels != 0);
}
| 1,137
| 21.76
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zerob/gen/x32-zerob-2c1s1r-gemm-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_zerob_gemm_ukernel_2c1s1r__scalar_float(
size_t groups,
size_t channels,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
float* w = (float*) packed_weights;
const float vzero = 0;
do {
// channel tile loop multiple of 2
size_t c = channels;
for (; c >= 2; c -= 2) {
unaligned_indexed_store_f32(w, 0, vzero);
unaligned_indexed_store_f32(w, 1, vzero);
w = (float*) ((uintptr_t) w + channel_tile_stride);
}
// channel subtile loop multiple of 1
if (c != 0) {
unaligned_indexed_store_f32(w, 0, vzero);
w = (float*) ((uintptr_t) w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,241
| 23.84
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zerob/gen/x32-zerob-2c1s1r-gemm-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_zerob_gemm_ukernel_2c1s1r__scalar_int(
size_t groups,
size_t channels,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
uint32_t* w = (uint32_t*) packed_weights;
const uint32_t vzero = 0;
do {
// channel tile loop multiple of 2
size_t c = channels;
for (; c >= 2; c -= 2) {
unaligned_indexed_store_u32(w, 0, vzero);
unaligned_indexed_store_u32(w, 1, vzero);
w = (uint32_t*) ((uintptr_t) w + channel_tile_stride);
}
// channel subtile loop multiple of 1
if (c != 0) {
unaligned_indexed_store_u32(w, 0, vzero);
w = (uint32_t*) ((uintptr_t) w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,254
| 24.1
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zerob/gen/x32-zerob-2c2s1r-gemm-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_zerob_gemm_ukernel_2c2s1r__scalar_float(
size_t groups,
size_t channels,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
float* w = (float*) packed_weights;
const float vzero = 0;
do {
// channel tile loop multiple of 2
size_t c = channels;
for (; c >= 2; c -= 2) {
unaligned_indexed_store_f32(w, 0, vzero);
unaligned_indexed_store_f32(w, 1, vzero);
w = (float*) ((uintptr_t) w + channel_tile_stride);
}
if XNN_UNLIKELY(c != 0) {
unaligned_indexed_store_f32(w, 0, vzero);
w = (float*) ((uintptr_t) w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,210
| 24.229167
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zerob/gen/x32-zerob-2c2s1r-gemm-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_zerob_gemm_ukernel_2c2s1r__scalar_int(
size_t groups,
size_t channels,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
uint32_t* w = (uint32_t*) packed_weights;
const uint32_t vzero = 0;
do {
// channel tile loop multiple of 2
size_t c = channels;
for (; c >= 2; c -= 2) {
unaligned_indexed_store_u32(w, 0, vzero);
unaligned_indexed_store_u32(w, 1, vzero);
w = (uint32_t*) ((uintptr_t) w + channel_tile_stride);
}
if XNN_UNLIKELY(c != 0) {
unaligned_indexed_store_u32(w, 0, vzero);
w = (uint32_t*) ((uintptr_t) w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,223
| 24.5
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zerob/gen/x32-zerob-4c1s1r-gemm-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_zerob_gemm_ukernel_4c1s1r__scalar_float(
size_t groups,
size_t channels,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
float* w = (float*) packed_weights;
const float vzero = 0;
do {
// channel tile loop multiple of 4
size_t c = channels;
for (; c >= 4; c -= 4) {
unaligned_indexed_store_f32(w, 0, vzero);
unaligned_indexed_store_f32(w, 1, vzero);
unaligned_indexed_store_f32(w, 2, vzero);
unaligned_indexed_store_f32(w, 3, vzero);
w = (float*) ((uintptr_t) w + channel_tile_stride);
}
// channel subtile loop multiple of 1
for (; c >= 1; c -= 1) {
unaligned_indexed_store_f32(w, 0, vzero);
w = (float*) ((uintptr_t) w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,348
| 24.942308
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zerob/gen/x32-zerob-4c1s1r-gemm-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_zerob_gemm_ukernel_4c1s1r__scalar_int(
size_t groups,
size_t channels,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
uint32_t* w = (uint32_t*) packed_weights;
const uint32_t vzero = 0;
do {
// channel tile loop multiple of 4
size_t c = channels;
for (; c >= 4; c -= 4) {
unaligned_indexed_store_u32(w, 0, vzero);
unaligned_indexed_store_u32(w, 1, vzero);
unaligned_indexed_store_u32(w, 2, vzero);
unaligned_indexed_store_u32(w, 3, vzero);
w = (uint32_t*) ((uintptr_t) w + channel_tile_stride);
}
// channel subtile loop multiple of 1
for (; c >= 1; c -= 1) {
unaligned_indexed_store_u32(w, 0, vzero);
w = (uint32_t*) ((uintptr_t) w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,361
| 25.192308
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zerob/gen/x32-zerob-4c4s1r-gemm-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_zerob_gemm_ukernel_4c4s1r__scalar_float(
size_t groups,
size_t channels,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
float* w = (float*) packed_weights;
const float vzero = 0;
do {
// channel tile loop multiple of 4
size_t c = channels;
for (; c >= 4; c -= 4) {
unaligned_indexed_store_f32(w, 0, vzero);
unaligned_indexed_store_f32(w, 1, vzero);
unaligned_indexed_store_f32(w, 2, vzero);
unaligned_indexed_store_f32(w, 3, vzero);
w = (float*) ((uintptr_t) w + channel_tile_stride);
}
if XNN_UNLIKELY(c != 0) {
// channels remainder (1..3)
float* prev_w = w;
if (c & 2) {
unaligned_indexed_store_f32(w, 0, vzero);
unaligned_indexed_store_f32(w, 1, vzero);
w += 2;
}
if (c & 1) {
unaligned_indexed_store_f32(w, 0, vzero);
w += 1;
}
w = (float*) ((uintptr_t) prev_w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,560
| 24.590164
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zerob/gen/x32-zerob-4c4s1r-gemm-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_zerob_gemm_ukernel_4c4s1r__scalar_int(
size_t groups,
size_t channels,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
uint32_t* w = (uint32_t*) packed_weights;
const uint32_t vzero = 0;
do {
// channel tile loop multiple of 4
size_t c = channels;
for (; c >= 4; c -= 4) {
unaligned_indexed_store_u32(w, 0, vzero);
unaligned_indexed_store_u32(w, 1, vzero);
unaligned_indexed_store_u32(w, 2, vzero);
unaligned_indexed_store_u32(w, 3, vzero);
w = (uint32_t*) ((uintptr_t) w + channel_tile_stride);
}
if XNN_UNLIKELY(c != 0) {
// channels remainder (1..3)
uint32_t* prev_w = w;
if (c & 2) {
unaligned_indexed_store_u32(w, 0, vzero);
unaligned_indexed_store_u32(w, 1, vzero);
w += 2;
}
if (c & 1) {
unaligned_indexed_store_u32(w, 0, vzero);
w += 1;
}
w = (uint32_t*) ((uintptr_t) prev_w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,576
| 24.852459
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zip/x32-zip-x2-neon.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/zip.h>
void xnn_x32_zip_x2_ukernel__neon(
size_t n,
const uint32_t* input,
uint32_t* output)
{
assert(n != 0);
assert(n % 4 == 0);
const uint32_t* x = input;
const uint32_t* y = (const uint32_t*) ((uintptr_t) x + n);
uint32_t* o = output;
while (n >= 16) {
uint32x4x2_t vxy;
vxy.val[0] = vld1q_u32(x); x += 4;
vxy.val[1] = vld1q_u32(y); y += 4;
vst2q_u32(o, vxy); o += 8;
n -= 16;
}
if XNN_UNLIKELY(n != 0) {
if (n & 8) {
uint32x2x2_t vxy;
vxy.val[0] = vld1_u32(x); x += 2;
vxy.val[1] = vld1_u32(y); y += 2;
vst2_u32(o, vxy); o += 4;
}
if (n & 4) {
uint32x2_t vxy = vld1_dup_u32(x);
vxy = vld1_lane_u32(y, vxy, 1);
vst1_u32(o, vxy);
}
}
}
| 978
| 20.282609
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zip/x32-zip-x2-sse2.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/zip.h>
void xnn_x32_zip_x2_ukernel__sse2(
size_t n,
const uint32_t* input,
uint32_t* output)
{
assert(n != 0);
assert(n % 4 == 0);
const uint32_t* x = input;
const uint32_t* y = (const uint32_t*) ((uintptr_t) x + n);
uint32_t* o = output;
while (n >= 16) {
const __m128i vx = _mm_loadu_si128((const __m128i*) x);
x += 4;
const __m128i vy = _mm_loadu_si128((const __m128i*) y);
y += 4;
const __m128i vxy_lo = _mm_unpacklo_epi32(vx, vy);
const __m128i vxy_hi = _mm_unpackhi_epi32(vx, vy);
_mm_storeu_si128((__m128i*) o, vxy_lo);
_mm_storeu_si128((__m128i*) (o + 4), vxy_hi);
o += 8;
n -= 16;
}
if XNN_UNLIKELY(n != 0) {
if (n & 8) {
const __m128i vx = _mm_loadl_epi64((const __m128i*) x);
x += 2;
const __m128i vy = _mm_loadl_epi64((const __m128i*) y);
y += 2;
const __m128i vxy = _mm_unpacklo_epi32(vx, vy);
_mm_storeu_si128((__m128i*) o, vxy);
o += 4;
}
if (n & 4) {
const uint32_t vx = *x;
const uint32_t vy = *y;
o[0] = vx;
o[1] = vy;
}
}
}
| 1,327
| 23.145455
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zip/x32-zip-x2-wasmsimd.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/zip.h>
void xnn_x32_zip_x2_ukernel__wasmsimd(
size_t n,
const uint32_t* input,
uint32_t* output)
{
assert(n != 0);
assert(n % sizeof(uint32_t) == 0);
const float* x = (const float*) input;
const float* y = (const float*) ((uintptr_t) x + n);
float* o = (float*) output;
while (n >= 4 * sizeof(uint32_t)) {
const v128_t vx = wasm_v128_load(x);
x += 4;
const v128_t vy = wasm_v128_load(y);
y += 4;
const v128_t vxy_lo = wasm_v32x4_shuffle(vx, vy, 0, 4, 1, 5);
const v128_t vxy_hi = wasm_v32x4_shuffle(vx, vy, 2, 6, 3, 7);
wasm_v128_store(o, vxy_lo);
wasm_v128_store(o + 4, vxy_hi);
o += 8;
n -= 4 * sizeof(uint32_t);
}
if XNN_UNLIKELY(n != 0) {
if (n & (2 * sizeof(uint32_t))) {
const double vx = *((const double*) x);
x += 2;
const double vy = *((const double*) y);
y += 2;
const v128_t vxy = wasm_f64x2_make(vx, vy);
wasm_v128_store(o, wasm_v32x4_shuffle(vxy, vxy, 0, 2, 1, 3));
o += 4;
}
if (n & (1 * sizeof(uint32_t))) {
const float vx = *x;
const float vy = *y;
o[0] = vx;
o[1] = vy;
}
}
}
| 1,380
| 24.109091
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zip/x32-zip-x3-neon.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/zip.h>
void xnn_x32_zip_x3_ukernel__neon(
size_t n,
const uint32_t* input,
uint32_t* output)
{
assert(n != 0);
assert(n % 4 == 0);
const uint32_t* x = input;
const uint32_t* y = (const uint32_t*) ((uintptr_t) x + n);
const uint32_t* z = (const uint32_t*) ((uintptr_t) y + n);
uint32_t* o = output;
while (n >= 16) {
uint32x4x3_t vxyz;
vxyz.val[0] = vld1q_u32(x); x += 4;
vxyz.val[1] = vld1q_u32(y); y += 4;
vxyz.val[2] = vld1q_u32(z); z += 4;
vst3q_u32(o, vxyz); o += 12;
n -= 16;
}
if XNN_UNLIKELY(n != 0) {
if (n & 8) {
uint32x2x3_t vxyz;
vxyz.val[0] = vld1_u32(x); x += 2;
vxyz.val[1] = vld1_u32(y); y += 2;
vxyz.val[2] = vld1_u32(z); z += 2;
vst3_u32(o, vxyz); o += 6;
}
if (n & 4) {
uint32x2_t vxy = vld1_dup_u32(x);
const uint32x2_t vz = vld1_dup_u32(z);
vxy = vld1_lane_u32(y, vxy, 1);
vst1_u32(o, vxy); o += 2;
vst1_lane_u32(o, vz, 0);
}
}
}
| 1,213
| 22.803922
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zip/x32-zip-x3-sse2.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/zip.h>
void xnn_x32_zip_x3_ukernel__sse2(
size_t n,
const uint32_t* input,
uint32_t* output)
{
assert(n != 0);
assert(n % 4 == 0);
const float* x = (const float*) input;
const float* y = (const float*) ((uintptr_t) x + n);
const float* z = (const float*) ((uintptr_t) y + n);
float* o = (float*) output;
while (n >= 16) {
// vx = ( x3, x2, x1, x0 )
const __m128 vx = _mm_loadu_ps(x);
x += 4;
// vy = ( y3, y2, y1, y0 )
const __m128 vy = _mm_loadu_ps(y);
y += 4;
// vz = ( z3, z2, z1, z0 )
const __m128 vz = _mm_loadu_ps(z);
z += 4;
// vxy = ( y2, y0, x2, x0 )
const __m128 vxy = _mm_shuffle_ps(vx, vy, _MM_SHUFFLE(2, 0, 2, 0));
// vyz = ( z3, z1, y3, y1 )
const __m128 vyz = _mm_shuffle_ps(vy, vz, _MM_SHUFFLE(3, 1, 3, 1));
// vzx = ( x3, x1, z2, z0 )
const __m128 vzx = _mm_shuffle_ps(vz, vx, _MM_SHUFFLE(3, 1, 2, 0));
// vxyz0 = ( x1, z0, y0, x0 )
const __m128 vxyz0 = _mm_shuffle_ps(vxy, vzx, _MM_SHUFFLE(2, 0, 2, 0));
// vxyz1 = ( y2, x2, z1, y1 )
const __m128 vxyz1 = _mm_shuffle_ps(vyz, vxy, _MM_SHUFFLE(3, 1, 2, 0));
// vxyz2 = ( z3, y3, x3, z2 )
const __m128 vxyz2 = _mm_shuffle_ps(vzx, vyz, _MM_SHUFFLE(3, 1, 3, 1));
_mm_storeu_ps(o, vxyz0);
_mm_storeu_ps(o + 4, vxyz1);
_mm_storeu_ps(o + 8, vxyz2);
o += 12;
n -= 16;
}
if XNN_UNLIKELY(n != 0) {
if (n & 8) {
// vx = ( -, -, x1, x0 )
const __m128 vx = _mm_castpd_ps(_mm_load_sd((const double*) x));
x += 2;
// vy = ( -, -, y1, y0 )
const __m128 vy = _mm_castpd_ps(_mm_load_sd((const double*) y));
y += 2;
// vz = ( -, -, z1, z0 )
const __m128 vz = _mm_castpd_ps(_mm_load_sd((const double*) z));
z += 2;
// vxy = ( y1, x1, y0, x0 )
const __m128 vxy = _mm_unpacklo_ps(vx, vy);
// vzx = ( x1, z1, x0, z0 )
const __m128 vzx = _mm_unpacklo_ps(vz, vx);
// vyz = ( z1, y1, z0, y0 )
const __m128 vyz = _mm_unpacklo_ps(vy, vz);
_mm_storeu_ps(o, _mm_shuffle_ps(vxy, vzx, _MM_SHUFFLE(3, 0, 1, 0)));
_mm_storeh_pi((__m64*) (o + 4), vyz);
o += 6;
}
if (n & 4) {
const __m128 vx = _mm_load_ss(x);
const __m128 vy = _mm_load_ss(y);
const __m128 vz = _mm_load_ss(z);
_mm_store_ss(o, vx);
_mm_store_ss(o + 1, vy);
_mm_store_ss(o + 2, vz);
}
}
}
| 2,620
| 28.122222
| 75
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zip/x32-zip-x3-wasmsimd.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/zip.h>
void xnn_x32_zip_x3_ukernel__wasmsimd(
size_t n,
const uint32_t* input,
uint32_t* output)
{
assert(n != 0);
assert(n % sizeof(uint32_t) == 0);
const float* x = (const float*) input;
const float* y = (const float*) ((uintptr_t) x + n);
const float* z = (const float*) ((uintptr_t) y + n);
float* o = (float*) output;
while (n >= 4 * sizeof(uint32_t)) {
// vx = ( x3, x2, x1, x0 )
const v128_t vx = wasm_v128_load(x);
x += 4;
// vy = ( y3, y2, y1, y0 )
const v128_t vy = wasm_v128_load(y);
y += 4;
// vz = ( z3, z2, z1, z0 )
const v128_t vz = wasm_v128_load(z);
z += 4;
// vxy = ( y2, y0, x2, x0 )
const v128_t vxy = wasm_v32x4_shuffle(vx, vy, 0, 2, 4, 6);
// vyz = ( z3, z1, y3, y1 )
const v128_t vyz = wasm_v32x4_shuffle(vy, vz, 1, 3, 5, 7);
// vzx = ( x3, x1, z2, z0 )
const v128_t vzx = wasm_v32x4_shuffle(vz, vx, 0, 2, 5, 7);
// vxyz0 = ( x1, z0, y0, x0 )
const v128_t vxyz0 = wasm_v32x4_shuffle(vxy, vzx, 0, 2, 4, 6);
// vxyz1 = ( y2, x2, z1, y1 )
const v128_t vxyz1 = wasm_v32x4_shuffle(vyz, vxy, 0, 2, 5, 7);
// vxyz2 = ( z3, y3, x3, z2 )
const v128_t vxyz2 = wasm_v32x4_shuffle(vzx, vyz, 1, 3, 5, 7);
wasm_v128_store(o, vxyz0);
wasm_v128_store(o + 4, vxyz1);
wasm_v128_store(o + 8, vxyz2);
o += 12;
n -= 4 * sizeof(uint32_t);
}
if XNN_UNLIKELY(n != 0) {
do {
const float vx = *x++;
const float vy = *y++;
const float vz = *z++;
o[0] = vx;
o[1] = vy;
o[2] = vz;
o += 3;
n -= sizeof(uint32_t);
} while (n != 0);
}
}
| 1,858
| 25.557143
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zip/x32-zip-x4-neon.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/zip.h>
void xnn_x32_zip_x4_ukernel__neon(
size_t n,
const uint32_t* input,
uint32_t* output)
{
assert(n != 0);
assert(n % 4 == 0);
const uint32_t* x = input;
const uint32_t* y = (const uint32_t*) ((uintptr_t) x + n);
const uint32_t* z = (const uint32_t*) ((uintptr_t) y + n);
const uint32_t* w = (const uint32_t*) ((uintptr_t) z + n);
uint32_t* o = output;
while (n >= 16) {
uint32x4x4_t vxyzw;
vxyzw.val[0] = vld1q_u32(x); x += 4;
vxyzw.val[1] = vld1q_u32(y); y += 4;
vxyzw.val[2] = vld1q_u32(z); z += 4;
vxyzw.val[3] = vld1q_u32(w); w += 4;
vst4q_u32(o, vxyzw); o += 16;
n -= 16;
}
if XNN_UNLIKELY(n != 0) {
if (n & 8) {
uint32x2x4_t vxyzw;
vxyzw.val[0] = vld1_u32(x); x += 2;
vxyzw.val[1] = vld1_u32(y); y += 2;
vxyzw.val[2] = vld1_u32(z); z += 2;
vxyzw.val[3] = vld1_u32(w); w += 2;
vst4_u32(o, vxyzw); o += 8;
}
if (n & 4) {
uint32x4_t vxyzw = vld1q_dup_u32(x);
vxyzw = vld1q_lane_u32(y, vxyzw, 1);
vxyzw = vld1q_lane_u32(z, vxyzw, 2);
vxyzw = vld1q_lane_u32(w, vxyzw, 3);
vst1q_u32(o, vxyzw);
}
}
}
| 1,380
| 24.574074
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zip/x32-zip-x4-scalar.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/zip.h>
void xnn_x32_zip_x4_ukernel__scalar(
size_t n,
const uint32_t* input,
uint32_t* output)
{
assert(n != 0);
assert(n % 4 == 0);
const uint32_t* x = input;
const uint32_t* y = (const uint32_t*) ((uintptr_t) x + n);
const uint32_t* z = (const uint32_t*) ((uintptr_t) y + n);
const uint32_t* w = (const uint32_t*) ((uintptr_t) z + n);
uint32_t* o = output;
do {
const uint32_t vx = *x++;
const uint32_t vy = *y++;
const uint32_t vz = *z++;
const uint32_t vw = *w++;
o[0] = vx;
o[1] = vy;
o[2] = vz;
o[3] = vw;
o += 4;
n -= 4;
} while (n != 0);
}
| 827
| 20.230769
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zip/x32-zip-x4-sse2.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/zip.h>
void xnn_x32_zip_x4_ukernel__sse2(
size_t n,
const uint32_t* input,
uint32_t* output)
{
assert(n != 0);
assert(n % 4 == 0);
const uint32_t* x = input;
const uint32_t* y = (const uint32_t*) ((uintptr_t) x + n);
const uint32_t* z = (const uint32_t*) ((uintptr_t) y + n);
const uint32_t* w = (const uint32_t*) ((uintptr_t) z + n);
uint32_t* o = output;
while (n >= 16) {
const __m128i vx = _mm_loadu_si128((const __m128i*) x);
x += 4;
const __m128i vy = _mm_loadu_si128((const __m128i*) y);
y += 4;
const __m128i vz = _mm_loadu_si128((const __m128i*) z);
z += 4;
const __m128i vw = _mm_loadu_si128((const __m128i*) w);
w += 4;
const __m128i vxy_lo = _mm_unpacklo_epi32(vx, vy);
const __m128i vxy_hi = _mm_unpackhi_epi32(vx, vy);
const __m128i vzw_lo = _mm_unpacklo_epi32(vz, vw);
const __m128i vzw_hi = _mm_unpackhi_epi32(vz, vw);
const __m128i vxyzw0 = _mm_unpacklo_epi64(vxy_lo, vzw_lo);
const __m128i vxyzw1 = _mm_unpackhi_epi64(vxy_lo, vzw_lo);
const __m128i vxyzw2 = _mm_unpacklo_epi64(vxy_hi, vzw_hi);
const __m128i vxyzw3 = _mm_unpackhi_epi64(vxy_hi, vzw_hi);
_mm_storeu_si128((__m128i*) o, vxyzw0);
_mm_storeu_si128((__m128i*) (o + 4), vxyzw1);
_mm_storeu_si128((__m128i*) (o + 8), vxyzw2);
_mm_storeu_si128((__m128i*) (o + 12), vxyzw3);
o += 16;
n -= 16;
}
if XNN_UNLIKELY(n != 0) {
if (n & 8) {
const __m128i vx = _mm_loadl_epi64((const __m128i*) x);
x += 2;
const __m128i vy = _mm_loadl_epi64((const __m128i*) y);
y += 2;
const __m128i vz = _mm_loadl_epi64((const __m128i*) z);
z += 2;
const __m128i vw = _mm_loadl_epi64((const __m128i*) w);
w += 2;
const __m128i vxy = _mm_unpacklo_epi32(vx, vy);
const __m128i vzw = _mm_unpacklo_epi32(vz, vw);
const __m128i vxyzw_lo = _mm_unpacklo_epi64(vxy, vzw);
const __m128i vxyzw_hi = _mm_unpackhi_epi64(vxy, vzw);
_mm_storeu_si128((__m128i*) o, vxyzw_lo);
_mm_storeu_si128((__m128i*) (o + 4), vxyzw_hi);
o += 8;
}
if (n & 4) {
const uint32_t vx = *x;
const uint32_t vy = *y;
const uint32_t vz = *z;
const uint32_t vw = *w;
o[0] = vx;
o[1] = vy;
o[2] = vz;
o[3] = vw;
}
}
}
| 2,544
| 28.252874
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zip/x32-zip-x4-wasmsimd.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/zip.h>
void xnn_x32_zip_x4_ukernel__wasmsimd(
size_t n,
const uint32_t* input,
uint32_t* output)
{
assert(n != 0);
assert(n % sizeof(uint32_t) == 0);
const float* x = (const float*) input;
const float* y = (const float*) ((uintptr_t) x + n);
const float* z = (const float*) ((uintptr_t) y + n);
const float* w = (const float*) ((uintptr_t) z + n);
float* o = (float*) output;
while (n >= 4 * sizeof(uint32_t)) {
const v128_t vx = wasm_v128_load(x);
x += 4;
const v128_t vy = wasm_v128_load(y);
y += 4;
const v128_t vz = wasm_v128_load(z);
z += 4;
const v128_t vw = wasm_v128_load(w);
w += 4;
const v128_t vxy_lo = wasm_v32x4_shuffle(vx, vy, 0, 4, 1, 5);
const v128_t vxy_hi = wasm_v32x4_shuffle(vx, vy, 2, 6, 3, 7);
const v128_t vzw_lo = wasm_v32x4_shuffle(vz, vw, 0, 4, 1, 5);
const v128_t vzw_hi = wasm_v32x4_shuffle(vz, vw, 2, 6, 3, 7);
const v128_t vxyzw0 = wasm_v32x4_shuffle(vxy_lo, vzw_lo, 0, 1, 4, 5);
const v128_t vxyzw1 = wasm_v32x4_shuffle(vxy_lo, vzw_lo, 2, 3, 6, 7);
const v128_t vxyzw2 = wasm_v32x4_shuffle(vxy_hi, vzw_hi, 0, 1, 4, 5);
const v128_t vxyzw3 = wasm_v32x4_shuffle(vxy_hi, vzw_hi, 2, 3, 6, 7);
wasm_v128_store(o, vxyzw0);
wasm_v128_store(o + 4, vxyzw1);
wasm_v128_store(o + 8, vxyzw2);
wasm_v128_store(o + 12, vxyzw3);
o += 16;
n -= 4 * sizeof(uint32_t);
}
if XNN_UNLIKELY(n != 0) {
if (n & (2 * sizeof(uint32_t))) {
const double vx = *((const double*) x);
x += 2;
const double vy = *((const double*) y);
y += 2;
const double vz = *((const double*) z);
z += 2;
const double vw = *((const double*) w);
w += 2;
const v128_t vxy = wasm_f64x2_make(vx, vy);
const v128_t vzw = wasm_f64x2_make(vz, vw);
const v128_t vxyzw_lo = wasm_v32x4_shuffle(vxy, vzw, 0, 2, 4, 6);
const v128_t vxyzw_hi = wasm_v32x4_shuffle(vxy, vzw, 1, 3, 5, 7);
wasm_v128_store(o, vxyzw_lo);
wasm_v128_store(o + 4, vxyzw_hi);
o += 8;
}
if (n & (1 * sizeof(uint32_t))) {
const float vx = *x;
const float vy = *y;
const float vz = *z;
const float vw = *w;
o[0] = vx;
o[1] = vy;
o[2] = vz;
o[3] = vw;
}
}
}
| 2,514
| 27.908046
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zip/x32-zip-xm-neon.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/zip.h>
void xnn_x32_zip_xm_ukernel__neon(
size_t n,
size_t m,
const uint32_t* input,
uint32_t* output)
{
assert(n != 0);
assert(n % 4 == 0);
assert(m >= 4);
const uint32_t* w = input;
const size_t group_increment = m * 4;
const size_t input_increment = n * 3;
const size_t output_increment = 16 - m * n;
const uint32_t* last_input = (const uint32_t*) ((uintptr_t) input + n * (m - 1));
uint32_t* last_output = (uint32_t*) ((uintptr_t) output + (m * 4 - 16));
for (size_t i = 0; i < m; i += 4) {
w = (const uint32_t*) ((uintptr_t) w + input_increment);
if (w >= last_input) {
w = last_input;
}
const uint32_t* z = (const uint32_t*) ((uintptr_t) w - n);
const uint32_t* y = (const uint32_t*) ((uintptr_t) z - n);
const uint32_t* x = (const uint32_t*) ((uintptr_t) y - n);
size_t k = n;
while (k >= 16) {
const uint32x4_t vx = vld1q_u32(x); x += 4;
const uint32x4_t vy = vld1q_u32(y); y += 4;
const uint32x4_t vz = vld1q_u32(z); z += 4;
const uint32x4_t vw = vld1q_u32(w); w += 4;
const uint32x4x2_t vxy = vzipq_u32(vx, vy);
const uint32x4x2_t vzw = vzipq_u32(vz, vw);
vst1_u32(output, vget_low_u32(vxy.val[0]));
vst1_u32(output + 2, vget_low_u32(vzw.val[0]));
output = (uint32_t*) ((uintptr_t) output + group_increment);
vst1_u32(output, vget_high_u32(vxy.val[0]));
vst1_u32(output + 2, vget_high_u32(vzw.val[0]));
output = (uint32_t*) ((uintptr_t) output + group_increment);
vst1_u32(output, vget_low_u32(vxy.val[1]));
vst1_u32(output + 2, vget_low_u32(vzw.val[1]));
output = (uint32_t*) ((uintptr_t) output + group_increment);
vst1_u32(output, vget_high_u32(vxy.val[1]));
vst1_u32(output + 2, vget_high_u32(vzw.val[1]));
output = (uint32_t*) ((uintptr_t) output + group_increment);
k -= 16;
}
if XNN_UNLIKELY(k != 0) {
if (k & 8) {
const uint32x2_t vx = vld1_u32(x); x += 2;
const uint32x2_t vy = vld1_u32(y); y += 2;
const uint32x2_t vz = vld1_u32(z); z += 2;
const uint32x2_t vw = vld1_u32(w); w += 2;
const uint32x2x2_t vxy = vzip_u32(vx, vy);
const uint32x2x2_t vzw = vzip_u32(vz, vw);
vst1_u32(output, vxy.val[0]);
vst1_u32(output + 2, vzw.val[0]);
output = (uint32_t*) ((uintptr_t) output + group_increment);
vst1_u32(output, vxy.val[1]);
vst1_u32(output + 2, vzw.val[1]);
output = (uint32_t*) ((uintptr_t) output + group_increment);
}
if (k & 4) {
const uint32x2_t vx = vld1_dup_u32(x);
const uint32x2_t vz = vld1_dup_u32(z);
const uint32x2_t vxy = vld1_lane_u32(y, vx, 1);
const uint32x2_t vzw = vld1_lane_u32(w, vz, 1); w += 1;
vst1_u32(output, vxy);
vst1_u32(output + 2, vzw);
output = (uint32_t*) ((uintptr_t) output + group_increment);
}
}
output = (uint32_t*) ((uintptr_t) output + output_increment);
if (output > last_output) {
output = last_output;
}
}
}
| 3,308
| 31.441176
| 83
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zip/x32-zip-xm-sse2.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/zip.h>
void xnn_x32_zip_xm_ukernel__sse2(
size_t n,
size_t m,
const uint32_t* input,
uint32_t* output)
{
assert(n != 0);
assert(n % 4 == 0);
assert(m >= 4);
const uint32_t* w = input;
const size_t group_increment = m * 4;
const size_t input_increment = n * 3;
const size_t output_increment = 16 - m * n;
const uint32_t* last_input = (const uint32_t*) ((uintptr_t) input + n * (m - 1));
uint32_t* last_output = (uint32_t*) ((uintptr_t) output + (m * 4 - 16));
for (size_t i = 0; i < m; i += 4) {
w = (const uint32_t*) ((uintptr_t) w + input_increment);
if (w >= last_input) {
w = last_input;
}
const uint32_t* z = (const uint32_t*) ((uintptr_t) w - n);
const uint32_t* y = (const uint32_t*) ((uintptr_t) z - n);
const uint32_t* x = (const uint32_t*) ((uintptr_t) y - n);
size_t k = n;
while (k >= 16) {
const __m128i vx = _mm_loadu_si128((const __m128i*) x);
x += 4;
const __m128i vy = _mm_loadu_si128((const __m128i*) y);
y += 4;
const __m128i vz = _mm_loadu_si128((const __m128i*) z);
z += 4;
const __m128i vw = _mm_loadu_si128((const __m128i*) w);
w += 4;
const __m128i vxy_lo = _mm_unpacklo_epi32(vx, vy);
const __m128i vxy_hi = _mm_unpackhi_epi32(vx, vy);
const __m128i vzw_lo = _mm_unpacklo_epi32(vz, vw);
const __m128i vzw_hi = _mm_unpackhi_epi32(vz, vw);
const __m128i vxyzw0 = _mm_unpacklo_epi64(vxy_lo, vzw_lo);
const __m128i vxyzw1 = _mm_unpackhi_epi64(vxy_lo, vzw_lo);
const __m128i vxyzw2 = _mm_unpacklo_epi64(vxy_hi, vzw_hi);
const __m128i vxyzw3 = _mm_unpackhi_epi64(vxy_hi, vzw_hi);
_mm_storeu_si128((__m128i*) output, vxyzw0);
output = (uint32_t*) ((uintptr_t) output + group_increment);
_mm_storeu_si128((__m128i*) output, vxyzw1);
output = (uint32_t*) ((uintptr_t) output + group_increment);
_mm_storeu_si128((__m128i*) output, vxyzw2);
output = (uint32_t*) ((uintptr_t) output + group_increment);
_mm_storeu_si128((__m128i*) output, vxyzw3);
output = (uint32_t*) ((uintptr_t) output + group_increment);
k -= 16;
}
if XNN_UNLIKELY(k != 0) {
if (k & 8) {
const __m128i vx = _mm_loadl_epi64((const __m128i*) x);
x += 2;
const __m128i vy = _mm_loadl_epi64((const __m128i*) y);
y += 2;
const __m128i vz = _mm_loadl_epi64((const __m128i*) z);
z += 2;
const __m128i vw = _mm_loadl_epi64((const __m128i*) w);
w += 2;
const __m128i vxy = _mm_unpacklo_epi32(vx, vy);
const __m128i vzw = _mm_unpacklo_epi32(vz, vw);
const __m128i vxyzw_lo = _mm_unpacklo_epi64(vxy, vzw);
const __m128i vxyzw_hi = _mm_unpackhi_epi64(vxy, vzw);
_mm_storeu_si128((__m128i*) output, vxyzw_lo);
output = (uint32_t*) ((uintptr_t) output + group_increment);
_mm_storeu_si128((__m128i*) output, vxyzw_hi);
output = (uint32_t*) ((uintptr_t) output + group_increment);
}
if (k & 4) {
const uint32_t vx = *x;
const uint32_t vy = *y;
const uint32_t vz = *z;
const uint32_t vw = *w++;
output[0] = vx;
output[1] = vy;
output[2] = vz;
output[3] = vw;
output = (uint32_t*) ((uintptr_t) output + group_increment);
}
}
output = (uint32_t*) ((uintptr_t) output + output_increment);
if (output > last_output) {
output = last_output;
}
}
}
| 3,735
| 31.206897
| 83
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-zip/x32-zip-xm-wasmsimd.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/zip.h>
void xnn_x32_zip_xm_ukernel__wasmsimd(
size_t n,
size_t m,
const uint32_t* input,
uint32_t* output)
{
assert(n != 0);
assert(n % sizeof(uint32_t) == 0);
assert(m >= 4);
const float* w = (const float*) input;
float* o = (float*) output;
const size_t group_increment = m * 4;
const size_t input_increment = n * 3;
const size_t output_increment = 4 * sizeof(uint32_t) - m * n;
const float* last_input = (const float*) ((uintptr_t) input + n * (m - 1));
float* last_output = (float*) ((uintptr_t) output + (m * 4 - 4 * sizeof(uint32_t)));
for (size_t i = 0; i < m; i += 4) {
w = (const float*) ((uintptr_t) w + input_increment);
if (w >= last_input) {
w = last_input;
}
const float* z = (const float*) ((uintptr_t) w - n);
const float* y = (const float*) ((uintptr_t) z - n);
const float* x = (const float*) ((uintptr_t) y - n);
size_t k = n;
while (k >= 4 * sizeof(uint32_t)) {
const v128_t vx = wasm_v128_load((const v128_t*) x);
x += 4;
const v128_t vy = wasm_v128_load((const v128_t*) y);
y += 4;
const v128_t vz = wasm_v128_load((const v128_t*) z);
z += 4;
const v128_t vw = wasm_v128_load((const v128_t*) w);
w += 4;
const v128_t vxy_lo = wasm_v32x4_shuffle(vx, vy, 0, 4, 1, 5);
const v128_t vxy_hi = wasm_v32x4_shuffle(vx, vy, 2, 6, 3, 7);
const v128_t vzw_lo = wasm_v32x4_shuffle(vz, vw, 0, 4, 1, 5);
const v128_t vzw_hi = wasm_v32x4_shuffle(vz, vw, 2, 6, 3, 7);
const v128_t vxyzw0 = wasm_v32x4_shuffle(vxy_lo, vzw_lo, 0, 1, 4, 5);
const v128_t vxyzw1 = wasm_v32x4_shuffle(vxy_lo, vzw_lo, 2, 3, 6, 7);
const v128_t vxyzw2 = wasm_v32x4_shuffle(vxy_hi, vzw_hi, 0, 1, 4, 5);
const v128_t vxyzw3 = wasm_v32x4_shuffle(vxy_hi, vzw_hi, 2, 3, 6, 7);
wasm_v128_store(o, vxyzw0);
o = (float*) ((uintptr_t) o + group_increment);
wasm_v128_store(o, vxyzw1);
o = (float*) ((uintptr_t) o + group_increment);
wasm_v128_store(o, vxyzw2);
o = (float*) ((uintptr_t) o + group_increment);
wasm_v128_store(o, vxyzw3);
o = (float*) ((uintptr_t) o + group_increment);
k -= 4 * sizeof(uint32_t);
}
if XNN_UNLIKELY(k != 0) {
if (k & (2 * sizeof(uint32_t))) {
const double vx = *((const double*) x);
x += 2;
const double vy = *((const double*) y);
y += 2;
const double vz = *((const double*) z);
z += 2;
const double vw = *((const double*) w);
w += 2;
const v128_t vxy = wasm_f64x2_make(vx, vy);
const v128_t vzw = wasm_f64x2_make(vz, vw);
const v128_t vxyzw_lo = wasm_v32x4_shuffle(vxy, vzw, 0, 2, 4, 6);
const v128_t vxyzw_hi = wasm_v32x4_shuffle(vxy, vzw, 1, 3, 5, 7);
wasm_v128_store(o, vxyzw_lo);
o = (float*) ((uintptr_t) o + group_increment);
wasm_v128_store(o, vxyzw_hi);
o = (float*) ((uintptr_t) o + group_increment);
}
if (k & (1 * sizeof(uint32_t))) {
const float vx = *x;
const float vy = *y;
const float vz = *z;
const float vw = *w++;
o[0] = vx;
o[1] = vy;
o[2] = vz;
o[3] = vw;
o = (float*) ((uintptr_t) o + group_increment);
}
}
o = (float*) ((uintptr_t) o + output_increment);
if (o > last_output) {
o = last_output;
}
}
}
| 3,658
| 30.273504
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-1x2-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__1x2_scalar_float(
const uint64_t *input,
uint64_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(double));
assert(input_stride >= block_width * sizeof(double));
const size_t tile_height = 1;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(double);
const size_t input_reset = tile_wbytes - block_height * input_stride;
const size_t output_reset = tile_width * output_stride - block_height * sizeof(double);
const size_t input_offset = tile_height * input_stride;
const double* i0 = (const double*) input;
double* o0 = (double*) output;
double* o1 = (double*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 1; bh -= 1) {
*o1++ = i0[1];
*o0++ = i0[0];
i0 = (const double*) ((uintptr_t) i0 + input_offset);
}
i0 = (const double*) ((uintptr_t) i0 + input_reset);
o0 = (double*) ((uintptr_t) o0 + output_reset);
o1 = (double*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 1,732
| 28.87931
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-1x2-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__1x2_scalar_int(
const uint64_t *input,
uint64_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int64_t));
assert(input_stride >= block_width * sizeof(int64_t));
const size_t tile_height = 1;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(int64_t);
const size_t input_reset = tile_wbytes - block_height * input_stride;
const size_t output_reset = tile_width * output_stride - block_height * sizeof(int64_t);
const size_t input_offset = tile_height * input_stride;
const int64_t* i0 = (const int64_t*) input;
int64_t* o0 = (int64_t*) output;
int64_t* o1 = (int64_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 1; bh -= 1) {
*o1++ = i0[1];
*o0++ = i0[0];
i0 = (const int64_t*) ((uintptr_t) i0 + input_offset);
}
i0 = (const int64_t*) ((uintptr_t) i0 + input_reset);
o0 = (int64_t*) ((uintptr_t) o0 + output_reset);
o1 = (int64_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 1,744
| 29.086207
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x1-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__2x1_scalar_float(
const uint64_t *input,
uint64_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(double));
assert(input_stride >= block_width * sizeof(double));
const size_t tile_height = 2;
const size_t tile_width = 1;
const size_t tile_wbytes = tile_width * sizeof(double);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(double);
const size_t input_offset = tile_height * input_stride;
const double* i0 = (const double*) input;
const double* i1 = (const double*) ((uintptr_t) i0 + input_stride);
double* o0 = (double*) output;
do {
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const double*) ((uintptr_t) i0 + input_offset);
i1 = (const double*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o0[0] = i0[0];
}
i0 = (const double*) ((uintptr_t) i0 + input_reset);
i1 = (const double*) ((uintptr_t) i0 + input_stride);
o0 = (double*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 1,837
| 30.152542
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x1-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__2x1_scalar_int(
const uint64_t *input,
uint64_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int64_t));
assert(input_stride >= block_width * sizeof(int64_t));
const size_t tile_height = 2;
const size_t tile_width = 1;
const size_t tile_wbytes = tile_width * sizeof(int64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int64_t);
const size_t input_offset = tile_height * input_stride;
const int64_t* i0 = (const int64_t*) input;
const int64_t* i1 = (const int64_t*) ((uintptr_t) i0 + input_stride);
int64_t* o0 = (int64_t*) output;
do {
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const int64_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int64_t*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o0[0] = i0[0];
}
i0 = (const int64_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int64_t*) ((uintptr_t) i0 + input_stride);
o0 = (int64_t*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 1,850
| 30.372881
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-multi-dec-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__2x2_multi_dec_zip_neon(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint64_t));
assert(input_stride >= block_width * sizeof(uint64_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint64_t);
const size_t tile_wbytes = tile_width * sizeof(uint64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint64_t) - tile_hbytes;
const uint64_t* i0 = input;
const uint64_t* i1 = (const uint64_t*) ((uintptr_t) i0 + input_stride);
uint64_t* o = (uint64_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint64x2_t v1_0 = vld1q_u64(i0); i0 = (uint64_t*) ((uintptr_t) i0 + input_offset);
const uint64x2_t v1_1 = vld1q_u64(i1); i1 = (uint64_t*) ((uintptr_t) i1 + input_offset);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
o = (uint64_t*) ((uintptr_t) o + oN_offset);
vst1q_u64(o, v0_0.val[1]);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint64_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u64(o, v0_0.val[0]);
}
o = (uint64_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint64x2_t v1_0 = vld1q_u64(i0);
const uint64x2_t v1_1 = vmovq_n_u64(0);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
uint64x1_t v0_low = vget_low_u64(v0_0.val[0]);
uint64x1_t v1_low = vget_low_u64(v0_0.val[1]);
if (bh & 1) {
o = (uint64_t*) ((uintptr_t) o + oN_stride);
vst1_u64(o, v1_low);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint64_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u64(o, v0_low);
}
}
i0 = (const uint64_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint64_t*) ((uintptr_t) i0 + input_stride);
o = (uint64_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,610
| 33.390476
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-multi-mov-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x64_transposec_ukernel__2x2_multi_mov_sse2(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint64_t));
assert(input_stride >= block_width * sizeof(uint64_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint64_t);
const size_t tile_wbytes = tile_width * sizeof(uint64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint64_t) - tile_hbytes;
const uint64_t* i0 = input;
const uint64_t* i1 = (const uint64_t*) ((uintptr_t) i0 + input_stride);
uint64_t* o = (uint64_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const __m128i v1_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint64_t*) ((uintptr_t) i0 + input_offset);
const __m128i v1_1 = _mm_loadu_si128((const __m128i*) i1);
i1 = (uint64_t*) ((uintptr_t) i1 + input_offset);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_1);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_1);
o = (uint64_t*) ((uintptr_t) o + oN_offset);
_mm_storeu_si128((__m128i*) o, v0_1);
uint64_t *oN = (uint64_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_0);
}
o = (uint64_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const __m128i v1_0 = _mm_loadu_si128((const __m128i*) i0);
const __m128i v1_1 = _mm_undefined_si128();
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_1);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_1);
if (bh & 1) {
o = (uint64_t*) ((uintptr_t) o + oN_stride);
_mm_storel_epi64((__m128i*) o, v0_1);
uint64_t *oN = (uint64_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_0);
}
}
i0 = (const uint64_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint64_t*) ((uintptr_t) i0 + input_stride);
o = (uint64_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,297
| 32.313131
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-multi-mov-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__2x2_multi_mov_zip_neon(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint64_t));
assert(input_stride >= block_width * sizeof(uint64_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint64_t);
const size_t tile_wbytes = tile_width * sizeof(uint64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint64_t) - tile_hbytes;
const uint64_t* i0 = input;
const uint64_t* i1 = (const uint64_t*) ((uintptr_t) i0 + input_stride);
uint64_t* o = (uint64_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint64x2_t v1_0 = vld1q_u64(i0); i0 = (uint64_t*) ((uintptr_t) i0 + input_offset);
const uint64x2_t v1_1 = vld1q_u64(i1); i1 = (uint64_t*) ((uintptr_t) i1 + input_offset);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
o = (uint64_t*) ((uintptr_t) o + oN_offset);
vst1q_u64(o, v0_0.val[1]);
uint64_t *oN = (uint64_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1q_u64(o, v0_0.val[0]);
}
o = (uint64_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint64x2_t v1_0 = vld1q_u64(i0);
const uint64x2_t v1_1 = vmovq_n_u64(0);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
uint64x1_t v0_low = vget_low_u64(v0_0.val[0]);
uint64x1_t v1_low = vget_low_u64(v0_0.val[1]);
if (bh & 1) {
o = (uint64_t*) ((uintptr_t) o + oN_stride);
vst1_u64(o, v1_low);
uint64_t *oN = (uint64_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_u64(o, v0_low);
}
}
i0 = (const uint64_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint64_t*) ((uintptr_t) i0 + input_stride);
o = (uint64_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,662
| 33.233645
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-multi-multi-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x64_transposec_ukernel__2x2_multi_multi_sse2(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint64_t));
assert(input_stride >= block_width * sizeof(uint64_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint64_t);
const size_t tile_wbytes = tile_width * sizeof(uint64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint64_t);
const uint64_t* i0 = input;
const uint64_t* i1 = (const uint64_t*) ((uintptr_t) i0 + input_stride);
uint64_t* o0 = (uint64_t*) output;
uint64_t* o1 = (uint64_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const __m128i v1_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint64_t*) ((uintptr_t) i0 + input_offset);
const __m128i v1_1 = _mm_loadu_si128((const __m128i*) i1);
i1 = (uint64_t*) ((uintptr_t) i1 + input_offset);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_1);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_1);
_mm_storeu_si128((__m128i*) o1, v0_1);
o1 = (uint64_t*) ((uintptr_t) o1 + tile_hbytes);
_mm_storeu_si128((__m128i*) o0, v0_0);
o0 = (uint64_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const __m128i v1_0 = _mm_loadu_si128((const __m128i*) i0);
const __m128i v1_1 = _mm_undefined_si128();
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_1);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_1);
if (bh & 1) {
_mm_storel_epi64((__m128i*) o1, v0_1);
_mm_storel_epi64((__m128i*) o0, v0_0);
}
}
i0 = (const uint64_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint64_t*) ((uintptr_t) i0 + input_stride);
o0 = (uint64_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint64_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,904
| 30.923077
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-multi-multi-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__2x2_multi_multi_zip_neon(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint64_t));
assert(input_stride >= block_width * sizeof(uint64_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint64_t);
const size_t tile_wbytes = tile_width * sizeof(uint64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint64_t);
const uint64_t* i0 = input;
const uint64_t* i1 = (const uint64_t*) ((uintptr_t) i0 + input_stride);
uint64_t* o0 = (uint64_t*) output;
uint64_t* o1 = (uint64_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint64x2_t v1_0 = vld1q_u64(i0); i0 = (uint64_t*) ((uintptr_t) i0 + input_offset);
const uint64x2_t v1_1 = vld1q_u64(i1); i1 = (uint64_t*) ((uintptr_t) i1 + input_offset);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
vst1q_u64(o1, v0_0.val[1]); o1 = (uint64_t*) ((uintptr_t) o1 + tile_hbytes);
vst1q_u64(o0, v0_0.val[0]); o0 = (uint64_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const uint64x2_t v1_0 = vld1q_u64(i0);
const uint64x2_t v1_1 = vmovq_n_u64(0);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
uint64x1_t v0_low = vget_low_u64(v0_0.val[0]);
uint64x1_t v1_low = vget_low_u64(v0_0.val[1]);
if (bh & 1) {
vst1_u64(o1, v1_low);
vst1_u64(o0, v0_low);
}
}
i0 = (const uint64_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint64_t*) ((uintptr_t) i0 + input_stride);
o0 = (uint64_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint64_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,257
| 32.587629
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-multi-switch-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x64_transposec_ukernel__2x2_multi_switch_sse2(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint64_t));
assert(input_stride >= block_width * sizeof(uint64_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint64_t);
const size_t tile_wbytes = tile_width * sizeof(uint64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint64_t);
const uint64_t* i0 = input;
const uint64_t* i1 = (const uint64_t*) ((uintptr_t) i0 + input_stride);
uint64_t* o = (uint64_t*) output;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const __m128i v1_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint64_t*) ((uintptr_t) i0 + input_offset);
const __m128i v1_1 = _mm_loadu_si128((const __m128i*) i1);
i1 = (uint64_t*) ((uintptr_t) i1 + input_offset);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_1);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_1);
uint64_t* oN = (uint64_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 1:
_mm_storeu_si128((__m128i*) oN, v0_1);
case 0:
_mm_storeu_si128((__m128i*) o, v0_0);
o = (uint64_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const __m128i v1_0 = _mm_loadu_si128((const __m128i*) i0);
const __m128i v1_1 = _mm_undefined_si128();
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_1);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_1);
if (bh & 1) {
uint64_t* oN = (uint64_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 1:
_mm_storel_epi64((__m128i*) oN, v0_1);
case 0:
_mm_storel_epi64((__m128i*) o, v0_0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint64_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint64_t*) ((uintptr_t) i0 + input_stride);
o = (uint64_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,163
| 29.718447
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-multi-switch-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__2x2_multi_switch_zip_neon(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint64_t));
assert(input_stride >= block_width * sizeof(uint64_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint64_t);
const size_t tile_wbytes = tile_width * sizeof(uint64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint64_t);
const uint64_t* i0 = input;
const uint64_t* i1 = (const uint64_t*) ((uintptr_t) i0 + input_stride);
uint64_t* o = (uint64_t*) output;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint64x2_t v1_0 = vld1q_u64(i0); i0 = (uint64_t*) ((uintptr_t) i0 + input_offset);
const uint64x2_t v1_1 = vld1q_u64(i1); i1 = (uint64_t*) ((uintptr_t) i1 + input_offset);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
uint64_t *oN = (uint64_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 1:
vst1q_u64(oN, v0_0.val[1]);
case 0:
vst1q_u64(o, v0_0.val[0]); o = (uint64_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const uint64x2_t v1_0 = vld1q_u64(i0);
const uint64x2_t v1_1 = vmovq_n_u64(0);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
uint64x1_t v0_low = vget_low_u64(v0_0.val[0]);
uint64x1_t v1_low = vget_low_u64(v0_0.val[1]);
if (bh & 1) {
uint64_t* oN = (uint64_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 1:
vst1_u64(oN, v1_low);
case 0:
vst1_u64(o, v0_low);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint64_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint64_t*) ((uintptr_t) i0 + input_stride);
o = (uint64_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,518
| 30.990909
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-reuse-dec-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__2x2_reuse_dec_zip_neon(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint64_t));
assert(input_stride >= block_width * sizeof(uint64_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint64_t);
const size_t tile_wbytes = tile_width * sizeof(uint64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint64_t) - tile_hbytes;
const uint64_t* i0 = input;
uint64_t* o = (uint64_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint64x2_t v1_0 = vld1q_u64(i0); i0 = (uint64_t*) ((uintptr_t) i0 + input_stride);
const uint64x2_t v1_1 = vld1q_u64(i0); i0 = (uint64_t*) ((uintptr_t) i0 + input_stride);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
o = (uint64_t*) ((uintptr_t) o + oN_offset);
vst1q_u64(o, v0_0.val[1]);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint64_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u64(o, v0_0.val[0]);
}
o = (uint64_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint64x2_t v1_0 = vld1q_u64(i0);
const uint64x2_t v1_1 = vmovq_n_u64(0);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
uint64x1_t v0_low = vget_low_u64(v0_0.val[0]);
uint64x1_t v1_low = vget_low_u64(v0_0.val[1]);
if (bh & 1) {
o = (uint64_t*) ((uintptr_t) o + oN_stride);
vst1_u64(o, v1_low);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint64_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u64(o, v0_low);
}
}
i0 = (const uint64_t*) ((uintptr_t) i0 + input_reset);
o = (uint64_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,418
| 32.519608
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-reuse-mov-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x64_transposec_ukernel__2x2_reuse_mov_sse2(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint64_t));
assert(input_stride >= block_width * sizeof(uint64_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint64_t);
const size_t tile_wbytes = tile_width * sizeof(uint64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint64_t) - tile_hbytes;
const uint64_t* i0 = input;
uint64_t* o = (uint64_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const __m128i v1_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint64_t*) ((uintptr_t) i0 + input_stride);
const __m128i v1_1 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint64_t*) ((uintptr_t) i0 + input_stride);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_1);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_1);
o = (uint64_t*) ((uintptr_t) o + oN_offset);
_mm_storeu_si128((__m128i*) o, v0_1);
uint64_t *oN = (uint64_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_0);
}
o = (uint64_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const __m128i v1_0 = _mm_loadu_si128((const __m128i*) i0);
const __m128i v1_1 = _mm_undefined_si128();
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_1);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_1);
if (bh & 1) {
o = (uint64_t*) ((uintptr_t) o + oN_stride);
_mm_storel_epi64((__m128i*) o, v0_1);
uint64_t *oN = (uint64_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_0);
}
}
i0 = (const uint64_t*) ((uintptr_t) i0 + input_reset);
o = (uint64_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,105
| 31.354167
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-reuse-mov-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__2x2_reuse_mov_zip_neon(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint64_t));
assert(input_stride >= block_width * sizeof(uint64_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint64_t);
const size_t tile_wbytes = tile_width * sizeof(uint64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint64_t) - tile_hbytes;
const uint64_t* i0 = input;
uint64_t* o = (uint64_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint64x2_t v1_0 = vld1q_u64(i0); i0 = (uint64_t*) ((uintptr_t) i0 + input_stride);
const uint64x2_t v1_1 = vld1q_u64(i0); i0 = (uint64_t*) ((uintptr_t) i0 + input_stride);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
o = (uint64_t*) ((uintptr_t) o + oN_offset);
vst1q_u64(o, v0_0.val[1]);
uint64_t *oN = (uint64_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1q_u64(o, v0_0.val[0]);
}
o = (uint64_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint64x2_t v1_0 = vld1q_u64(i0);
const uint64x2_t v1_1 = vmovq_n_u64(0);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
uint64x1_t v0_low = vget_low_u64(v0_0.val[0]);
uint64x1_t v1_low = vget_low_u64(v0_0.val[1]);
if (bh & 1) {
o = (uint64_t*) ((uintptr_t) o + oN_stride);
vst1_u64(o, v1_low);
uint64_t *oN = (uint64_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_u64(o, v0_low);
}
}
i0 = (const uint64_t*) ((uintptr_t) i0 + input_reset);
o = (uint64_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,470
| 32.375
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-reuse-multi-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x64_transposec_ukernel__2x2_reuse_multi_sse2(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint64_t));
assert(input_stride >= block_width * sizeof(uint64_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint64_t);
const size_t tile_wbytes = tile_width * sizeof(uint64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint64_t);
const uint64_t* i0 = input;
uint64_t* o0 = (uint64_t*) output;
uint64_t* o1 = (uint64_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const __m128i v1_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint64_t*) ((uintptr_t) i0 + input_stride);
const __m128i v1_1 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint64_t*) ((uintptr_t) i0 + input_stride);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_1);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_1);
_mm_storeu_si128((__m128i*) o1, v0_1);
o1 = (uint64_t*) ((uintptr_t) o1 + tile_hbytes);
_mm_storeu_si128((__m128i*) o0, v0_0);
o0 = (uint64_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const __m128i v1_0 = _mm_loadu_si128((const __m128i*) i0);
const __m128i v1_1 = _mm_undefined_si128();
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_1);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_1);
if (bh & 1) {
_mm_storel_epi64((__m128i*) o1, v0_1);
_mm_storel_epi64((__m128i*) o0, v0_0);
}
}
i0 = (const uint64_t*) ((uintptr_t) i0 + input_reset);
o0 = (uint64_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint64_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,712
| 29.829545
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-reuse-multi-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__2x2_reuse_multi_zip_neon(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint64_t));
assert(input_stride >= block_width * sizeof(uint64_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint64_t);
const size_t tile_wbytes = tile_width * sizeof(uint64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint64_t);
const uint64_t* i0 = input;
uint64_t* o0 = (uint64_t*) output;
uint64_t* o1 = (uint64_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint64x2_t v1_0 = vld1q_u64(i0); i0 = (uint64_t*) ((uintptr_t) i0 + input_stride);
const uint64x2_t v1_1 = vld1q_u64(i0); i0 = (uint64_t*) ((uintptr_t) i0 + input_stride);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
vst1q_u64(o1, v0_0.val[1]); o1 = (uint64_t*) ((uintptr_t) o1 + tile_hbytes);
vst1q_u64(o0, v0_0.val[0]); o0 = (uint64_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const uint64x2_t v1_0 = vld1q_u64(i0);
const uint64x2_t v1_1 = vmovq_n_u64(0);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
uint64x1_t v0_low = vget_low_u64(v0_0.val[0]);
uint64x1_t v1_low = vget_low_u64(v0_0.val[1]);
if (bh & 1) {
vst1_u64(o1, v1_low);
vst1_u64(o0, v0_low);
}
}
i0 = (const uint64_t*) ((uintptr_t) i0 + input_reset);
o0 = (uint64_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint64_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,065
| 31.617021
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-reuse-switch-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x64_transposec_ukernel__2x2_reuse_switch_sse2(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint64_t));
assert(input_stride >= block_width * sizeof(uint64_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint64_t);
const size_t tile_wbytes = tile_width * sizeof(uint64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint64_t);
const uint64_t* i0 = input;
uint64_t* o = (uint64_t*) output;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const __m128i v1_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint64_t*) ((uintptr_t) i0 + input_stride);
const __m128i v1_1 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint64_t*) ((uintptr_t) i0 + input_stride);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_1);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_1);
uint64_t* oN = (uint64_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 1:
_mm_storeu_si128((__m128i*) oN, v0_1);
case 0:
_mm_storeu_si128((__m128i*) o, v0_0);
o = (uint64_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const __m128i v1_0 = _mm_loadu_si128((const __m128i*) i0);
const __m128i v1_1 = _mm_undefined_si128();
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_1);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_1);
if (bh & 1) {
uint64_t* oN = (uint64_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 1:
_mm_storel_epi64((__m128i*) oN, v0_1);
case 0:
_mm_storel_epi64((__m128i*) o, v0_0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint64_t*) ((uintptr_t) i0 + input_reset);
o = (uint64_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,971
| 28.72
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-reuse-switch-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__2x2_reuse_switch_zip_neon(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint64_t));
assert(input_stride >= block_width * sizeof(uint64_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_hbytes = tile_height * sizeof(uint64_t);
const size_t tile_wbytes = tile_width * sizeof(uint64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint64_t);
const uint64_t* i0 = input;
uint64_t* o = (uint64_t*) output;
do {
const size_t rem = min(block_width - 1, 1);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
const uint64x2_t v1_0 = vld1q_u64(i0); i0 = (uint64_t*) ((uintptr_t) i0 + input_stride);
const uint64x2_t v1_1 = vld1q_u64(i0); i0 = (uint64_t*) ((uintptr_t) i0 + input_stride);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
uint64_t *oN = (uint64_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 1:
vst1q_u64(oN, v0_0.val[1]);
case 0:
vst1q_u64(o, v0_0.val[0]); o = (uint64_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const uint64x2_t v1_0 = vld1q_u64(i0);
const uint64x2_t v1_1 = vmovq_n_u64(0);
uint64x2x2_t v0_0;
#if XNN_ARCH_ARM64
v0_0.val[0] = vzip1q_u64(v1_0, v1_1);
v0_0.val[1] = vzip2q_u64(v1_0, v1_1);
#else
v0_0.val[0] = vcombine_u64(vget_low_u64(v1_0), vget_low_u64(v1_1));
v0_0.val[1] = vcombine_u64(vget_high_u64(v1_0), vget_high_u64(v1_1));
#endif
uint64x1_t v0_low = vget_low_u64(v0_0.val[0]);
uint64x1_t v1_low = vget_low_u64(v0_0.val[1]);
if (bh & 1) {
uint64_t* oN = (uint64_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 1:
vst1_u64(oN, v1_low);
case 0:
vst1_u64(o, v0_low);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint64_t*) ((uintptr_t) i0 + input_reset);
o = (uint64_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,326
| 30.093458
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__2x2_scalar_float(
const uint64_t *input,
uint64_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(double));
assert(input_stride >= block_width * sizeof(double));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(double);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(double);
const size_t input_offset = tile_height * input_stride;
const double* i0 = (const double*) input;
const double* i1 = (const double*) ((uintptr_t) i0 + input_stride);
double* o0 = (double*) output;
double* o1 = (double*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o1++ = i0[1];
*o1++ = i1[1];
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const double*) ((uintptr_t) i0 + input_offset);
i1 = (const double*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o1[0] = i0[1];
o0[0] = i0[0];
}
i0 = (const double*) ((uintptr_t) i0 + input_reset);
i1 = (const double*) ((uintptr_t) i0 + input_stride);
o0 = (double*) ((uintptr_t) o0 + output_reset);
o1 = (double*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,076
| 30
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-2x2-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__2x2_scalar_int(
const uint64_t *input,
uint64_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int64_t));
assert(input_stride >= block_width * sizeof(int64_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(int64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int64_t);
const size_t input_offset = tile_height * input_stride;
const int64_t* i0 = (const int64_t*) input;
const int64_t* i1 = (const int64_t*) ((uintptr_t) i0 + input_stride);
int64_t* o0 = (int64_t*) output;
int64_t* o1 = (int64_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o1++ = i0[1];
*o1++ = i1[1];
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const int64_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int64_t*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o1[0] = i0[1];
o0[0] = i0[0];
}
i0 = (const int64_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int64_t*) ((uintptr_t) i0 + input_stride);
o0 = (int64_t*) ((uintptr_t) o0 + output_reset);
o1 = (int64_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,092
| 30.238806
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-4x1-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__4x1_scalar_float(
const uint64_t *input,
uint64_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(double));
assert(input_stride >= block_width * sizeof(double));
const size_t tile_height = 4;
const size_t tile_width = 1;
const size_t tile_wbytes = tile_width * sizeof(double);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(double);
const size_t input_offset = tile_height * input_stride;
const double* i0 = (const double*) input;
const double* i1 = (const double*) ((uintptr_t) i0 + input_stride);
const double* i2 = (const double*) ((uintptr_t) i1 + input_stride);
const double* i3 = (const double*) ((uintptr_t) i2 + input_stride);
double* o0 = (double*) output;
do {
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const double*) ((uintptr_t) i0 + input_offset);
i1 = (const double*) ((uintptr_t) i1 + input_offset);
i2 = (const double*) ((uintptr_t) i2 + input_offset);
i3 = (const double*) ((uintptr_t) i3 + input_offset);
}
const double* i = i0;
if (bh & 2) {
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o0[0] = i[0];
}
i0 = (const double*) ((uintptr_t) i0 + input_reset);
i1 = (const double*) ((uintptr_t) i0 + input_stride);
i2 = (const double*) ((uintptr_t) i1 + input_stride);
i3 = (const double*) ((uintptr_t) i2 + input_stride);
o0 = (double*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,375
| 31.108108
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-4x1-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__4x1_scalar_int(
const uint64_t *input,
uint64_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int64_t));
assert(input_stride >= block_width * sizeof(int64_t));
const size_t tile_height = 4;
const size_t tile_width = 1;
const size_t tile_wbytes = tile_width * sizeof(int64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int64_t);
const size_t input_offset = tile_height * input_stride;
const int64_t* i0 = (const int64_t*) input;
const int64_t* i1 = (const int64_t*) ((uintptr_t) i0 + input_stride);
const int64_t* i2 = (const int64_t*) ((uintptr_t) i1 + input_stride);
const int64_t* i3 = (const int64_t*) ((uintptr_t) i2 + input_stride);
int64_t* o0 = (int64_t*) output;
do {
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const int64_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int64_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int64_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int64_t*) ((uintptr_t) i3 + input_offset);
}
const int64_t* i = i0;
if (bh & 2) {
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o0[0] = i[0];
}
i0 = (const int64_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int64_t*) ((uintptr_t) i0 + input_stride);
i2 = (const int64_t*) ((uintptr_t) i1 + input_stride);
i3 = (const int64_t*) ((uintptr_t) i2 + input_stride);
o0 = (int64_t*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,397
| 31.405405
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-4x2-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__4x2_scalar_float(
const uint64_t *input,
uint64_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(double));
assert(input_stride >= block_width * sizeof(double));
const size_t tile_height = 4;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(double);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(double);
const size_t input_offset = tile_height * input_stride;
const double* i0 = (const double*) input;
const double* i1 = (const double*) ((uintptr_t) i0 + input_stride);
const double* i2 = (const double*) ((uintptr_t) i1 + input_stride);
const double* i3 = (const double*) ((uintptr_t) i2 + input_stride);
double* o0 = (double*) output;
double* o1 = (double*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o1++ = i0[1];
*o1++ = i1[1];
*o1++ = i2[1];
*o1++ = i3[1];
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const double*) ((uintptr_t) i0 + input_offset);
i1 = (const double*) ((uintptr_t) i1 + input_offset);
i2 = (const double*) ((uintptr_t) i2 + input_offset);
i3 = (const double*) ((uintptr_t) i3 + input_offset);
}
const double* i = i0;
if (bh & 2) {
o1[0] = i0[1];
o1[1] = i1[1];
o1 += 2;
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o1[0] = i[1];
o0[0] = i[0];
}
i0 = (const double*) ((uintptr_t) i0 + input_reset);
i1 = (const double*) ((uintptr_t) i0 + input_stride);
i2 = (const double*) ((uintptr_t) i1 + input_stride);
i3 = (const double*) ((uintptr_t) i2 + input_stride);
o0 = (double*) ((uintptr_t) o0 + output_reset);
o1 = (double*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,712
| 30.183908
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-4x2-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x64_transposec_ukernel__4x2_scalar_int(
const uint64_t *input,
uint64_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int64_t));
assert(input_stride >= block_width * sizeof(int64_t));
const size_t tile_height = 4;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(int64_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int64_t);
const size_t input_offset = tile_height * input_stride;
const int64_t* i0 = (const int64_t*) input;
const int64_t* i1 = (const int64_t*) ((uintptr_t) i0 + input_stride);
const int64_t* i2 = (const int64_t*) ((uintptr_t) i1 + input_stride);
const int64_t* i3 = (const int64_t*) ((uintptr_t) i2 + input_stride);
int64_t* o0 = (int64_t*) output;
int64_t* o1 = (int64_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o1++ = i0[1];
*o1++ = i1[1];
*o1++ = i2[1];
*o1++ = i3[1];
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const int64_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int64_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int64_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int64_t*) ((uintptr_t) i3 + input_offset);
}
const int64_t* i = i0;
if (bh & 2) {
o1[0] = i0[1];
o1[1] = i1[1];
o1 += 2;
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o1[0] = i[1];
o0[0] = i[0];
}
i0 = (const int64_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int64_t*) ((uintptr_t) i0 + input_stride);
i2 = (const int64_t*) ((uintptr_t) i1 + input_stride);
i3 = (const int64_t*) ((uintptr_t) i2 + input_stride);
o0 = (int64_t*) ((uintptr_t) o0 + output_reset);
o1 = (int64_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,737
| 30.471264
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-4x4-multi-mov-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x64_transposec_ukernel__4x4_multi_mov_avx(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(double));
assert(input_stride >= block_width * sizeof(double));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(double);
const size_t tile_wbytes = tile_width * sizeof(double);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(double) - tile_hbytes;
const double* i0 = (const double*) input;
const double* i1 = (const double*) ((uintptr_t) i0 + input_stride);
const double* i2 = (const double*) ((uintptr_t) i1 + input_stride);
const double* i3 = (const double*) ((uintptr_t) i2 + input_stride);
double* o = (double*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
__m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[rem ^ 3]));
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const __m256d v2_0 = _mm256_maskload_pd(i0, vmask);
i0 = (double*) ((uintptr_t) i0 + input_offset);
const __m256d v2_1 = _mm256_maskload_pd(i1, vmask);
i1 = (double*) ((uintptr_t) i1 + input_offset);
const __m256d v2_2 = _mm256_maskload_pd(i2, vmask);
i2 = (double*) ((uintptr_t) i2 + input_offset);
const __m256d v2_3 = _mm256_maskload_pd(i3, vmask);
i3 = (double*) ((uintptr_t) i3 + input_offset);
const __m256d v1_0 = _mm256_unpacklo_pd(v2_0, v2_1);
const __m256d v1_1 = _mm256_unpackhi_pd(v2_0, v2_1);
const __m256d v1_2 = _mm256_unpacklo_pd(v2_2, v2_3);
const __m256d v1_3 = _mm256_unpackhi_pd(v2_2, v2_3);
const __m256d v0_0 = _mm256_insertf128_pd(v1_0, _mm256_castpd256_pd128(v1_2), 1);
const __m256d v0_2 = _mm256_permute2f128_pd(v1_0, v1_2, 0x31);
const __m256d v0_1 = _mm256_insertf128_pd(v1_1, _mm256_castpd256_pd128(v1_3), 1);
const __m256d v0_3 = _mm256_permute2f128_pd(v1_1, v1_3, 0x31);
o = (double*) ((uintptr_t) o + oN_offset);
_mm256_storeu_pd(o, v0_3);
double *oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm256_storeu_pd(o, v0_2);
oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm256_storeu_pd(o, v0_1);
oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm256_storeu_pd(o, v0_0);
}
o = (double*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const __m256d v2_0 = _mm256_maskload_pd(i0, vmask);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m256d v2_1 = _mm256_maskload_pd(i1, vmask);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const __m256d v2_2 = _mm256_maskload_pd(i2, vmask);
const __m256d v2_3 = _mm256_undefined_pd();
const __m256d v1_0 = _mm256_unpacklo_pd(v2_0, v2_1);
const __m256d v1_1 = _mm256_unpackhi_pd(v2_0, v2_1);
const __m256d v1_2 = _mm256_unpacklo_pd(v2_2, v2_3);
const __m256d v1_3 = _mm256_unpackhi_pd(v2_2, v2_3);
__m256d v0_0 = _mm256_insertf128_pd(v1_0, _mm256_castpd256_pd128(v1_2), 1);
__m256d v0_2 = _mm256_permute2f128_pd(v1_0, v1_2, 0x31);
__m256d v0_1 = _mm256_insertf128_pd(v1_1, _mm256_castpd256_pd128(v1_3), 1);
__m256d v0_3 = _mm256_permute2f128_pd(v1_1, v1_3, 0x31);
__m128d v0_0_lo = _mm256_castpd256_pd128(v0_0);
__m128d v0_1_lo = _mm256_castpd256_pd128(v0_1);
__m128d v0_2_lo = _mm256_castpd256_pd128(v0_2);
__m128d v0_3_lo = _mm256_castpd256_pd128(v0_3);
if (bh & 2) {
o = (double*) ((uintptr_t) o + oN_stride);
_mm_storeu_pd(o, v0_3_lo);
v0_3_lo = _mm256_extractf128_pd(v0_3, 1);
double *oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storeu_pd(o, v0_2_lo);
v0_2_lo = _mm256_extractf128_pd(v0_2, 1);
oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storeu_pd(o, v0_1_lo);
v0_1_lo = _mm256_extractf128_pd(v0_1, 1);
oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storeu_pd(o, v0_0_lo);
v0_0_lo = _mm256_extractf128_pd(v0_0, 1);
o += 2;
}
if (bh & 1) {
o = (double*) ((uintptr_t) o + oN_stride);
_mm_storel_pd(o, v0_3_lo);
double *oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storel_pd(o, v0_2_lo);
oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storel_pd(o, v0_1_lo);
oN = (double*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storel_pd(o, v0_0_lo);
}
}
i0 = (const double*) ((uintptr_t) i0 + input_reset);
i1 = (const double*) ((uintptr_t) i0 + input_stride);
i2 = (const double*) ((uintptr_t) i1 + input_stride);
i3 = (const double*) ((uintptr_t) i2 + input_stride);
o = (double*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 6,631
| 36.897143
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/x64-transposec/gen/x64-transposec-4x4-multi-multi-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x64_transposec_ukernel__4x4_multi_multi_avx(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(double));
assert(input_stride >= block_width * sizeof(double));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(double);
const size_t tile_wbytes = tile_width * sizeof(double);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(double);
const double* i0 = (const double*) input;
const double* i1 = (const double*) ((uintptr_t) i0 + input_stride);
const double* i2 = (const double*) ((uintptr_t) i1 + input_stride);
const double* i3 = (const double*) ((uintptr_t) i2 + input_stride);
double* o0 = (double*) output;
do {
double* o1 = (double*) (block_width < 2 ? o0 : (double*) ((uintptr_t) o0 + output_stride));
double* o2 = (double*) (block_width <= 2 ? o0 : (double*) ((uintptr_t) o1 + output_stride));
double* o3 = (double*) (block_width < 4 ? o0 : (double*) ((uintptr_t) o2 + output_stride));
const size_t rem = min(block_width - 1, 3);
__m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[rem ^ 3]));
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const __m256d v2_0 = _mm256_maskload_pd(i0, vmask);
i0 = (double*) ((uintptr_t) i0 + input_offset);
const __m256d v2_1 = _mm256_maskload_pd(i1, vmask);
i1 = (double*) ((uintptr_t) i1 + input_offset);
const __m256d v2_2 = _mm256_maskload_pd(i2, vmask);
i2 = (double*) ((uintptr_t) i2 + input_offset);
const __m256d v2_3 = _mm256_maskload_pd(i3, vmask);
i3 = (double*) ((uintptr_t) i3 + input_offset);
const __m256d v1_0 = _mm256_unpacklo_pd(v2_0, v2_1);
const __m256d v1_1 = _mm256_unpackhi_pd(v2_0, v2_1);
const __m256d v1_2 = _mm256_unpacklo_pd(v2_2, v2_3);
const __m256d v1_3 = _mm256_unpackhi_pd(v2_2, v2_3);
const __m256d v0_0 = _mm256_insertf128_pd(v1_0, _mm256_castpd256_pd128(v1_2), 1);
const __m256d v0_2 = _mm256_permute2f128_pd(v1_0, v1_2, 0x31);
const __m256d v0_1 = _mm256_insertf128_pd(v1_1, _mm256_castpd256_pd128(v1_3), 1);
const __m256d v0_3 = _mm256_permute2f128_pd(v1_1, v1_3, 0x31);
_mm256_storeu_pd(o3, v0_3);
o3 = (double*) ((uintptr_t) o3 + tile_hbytes);
_mm256_storeu_pd(o2, v0_2);
o2 = (double*) ((uintptr_t) o2 + tile_hbytes);
_mm256_storeu_pd(o1, v0_1);
o1 = (double*) ((uintptr_t) o1 + tile_hbytes);
_mm256_storeu_pd(o0, v0_0);
o0 = (double*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const __m256d v2_0 = _mm256_maskload_pd(i0, vmask);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m256d v2_1 = _mm256_maskload_pd(i1, vmask);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const __m256d v2_2 = _mm256_maskload_pd(i2, vmask);
const __m256d v2_3 = _mm256_undefined_pd();
const __m256d v1_0 = _mm256_unpacklo_pd(v2_0, v2_1);
const __m256d v1_1 = _mm256_unpackhi_pd(v2_0, v2_1);
const __m256d v1_2 = _mm256_unpacklo_pd(v2_2, v2_3);
const __m256d v1_3 = _mm256_unpackhi_pd(v2_2, v2_3);
__m256d v0_0 = _mm256_insertf128_pd(v1_0, _mm256_castpd256_pd128(v1_2), 1);
__m256d v0_2 = _mm256_permute2f128_pd(v1_0, v1_2, 0x31);
__m256d v0_1 = _mm256_insertf128_pd(v1_1, _mm256_castpd256_pd128(v1_3), 1);
__m256d v0_3 = _mm256_permute2f128_pd(v1_1, v1_3, 0x31);
__m128d v0_0_lo = _mm256_castpd256_pd128(v0_0);
__m128d v0_1_lo = _mm256_castpd256_pd128(v0_1);
__m128d v0_2_lo = _mm256_castpd256_pd128(v0_2);
__m128d v0_3_lo = _mm256_castpd256_pd128(v0_3);
if (bh & 2) {
_mm_storeu_pd(o3, v0_3_lo);
v0_3_lo = _mm256_extractf128_pd(v0_3, 1);
o3 += 2;
_mm_storeu_pd(o2, v0_2_lo);
v0_2_lo = _mm256_extractf128_pd(v0_2, 1);
o2 += 2;
_mm_storeu_pd(o1, v0_1_lo);
v0_1_lo = _mm256_extractf128_pd(v0_1, 1);
o1 += 2;
_mm_storeu_pd(o0, v0_0_lo);
v0_0_lo = _mm256_extractf128_pd(v0_0, 1);
o0 += 2;
}
if (bh & 1) {
_mm_storel_pd(o3, v0_3_lo);
_mm_storel_pd(o2, v0_2_lo);
_mm_storel_pd(o1, v0_1_lo);
_mm_storel_pd(o0, v0_0_lo);
}
}
i0 = (const double*) ((uintptr_t) i0 + input_reset);
i1 = (const double*) ((uintptr_t) i0 + input_stride);
i2 = (const double*) ((uintptr_t) i1 + input_stride);
i3 = (const double*) ((uintptr_t) i2 + input_stride);
o0 = (double*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,557
| 38.140845
| 108
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.