repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/x16-packw/gen/x16-packw-x8-gemm-goi-neon-ld4lane-x16-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x16-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
#include <xnnpack/prefetch.h>
void xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x16_prfm(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint16_t* weights,
const uint16_t* bias,
uint16_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint16x8x4_t vtmp0123x01234567;
uint16x8x4_t vtmp4567x01234567;
uint16x8x4_t vtmp89ABx01234567;
uint16x8x4_t vtmpCDEFx01234567;
do {
// NC main loop multiple of 8
const uint16_t* w0 = weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(bias != NULL) {
uint16x8_t vb0 = vld1q_u16(bias); bias += 8;
vst1q_u16(packed_weights, vb0); packed_weights += 8;
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
const uint16_t* w1 = w0 + kc;
const uint16_t* w2 = w1 + kc;
const uint16_t* w3 = w2 + kc;
const uint16_t* w4 = w3 + kc;
const uint16_t* w5 = w4 + kc;
const uint16_t* w6 = w5 + kc;
const uint16_t* w7 = w6 + kc;
xnn_prefetch_to_l1((const int8_t*) w0);
xnn_prefetch_to_l1((const int8_t*) w0 + 64);
xnn_prefetch_to_l1((const int8_t*) w1);
xnn_prefetch_to_l1((const int8_t*) w1 + 64);
xnn_prefetch_to_l1((const int8_t*) w2);
xnn_prefetch_to_l1((const int8_t*) w2 + 64);
xnn_prefetch_to_l1((const int8_t*) w3);
xnn_prefetch_to_l1((const int8_t*) w3 + 64);
xnn_prefetch_to_l1((const int8_t*) w4);
xnn_prefetch_to_l1((const int8_t*) w4 + 64);
xnn_prefetch_to_l1((const int8_t*) w5);
xnn_prefetch_to_l1((const int8_t*) w5 + 64);
xnn_prefetch_to_l1((const int8_t*) w6);
xnn_prefetch_to_l1((const int8_t*) w6 + 64);
xnn_prefetch_to_l1((const int8_t*) w7);
xnn_prefetch_to_l1((const int8_t*) w7 + 64);
// KC main loop multiple of 16
size_t k = kc;
for (; k >= 16; k -= 16) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w0, vtmp4567x01234567, 0); w0 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w1, vtmp4567x01234567, 1); w1 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w2, vtmp4567x01234567, 2); w2 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w3, vtmp4567x01234567, 3); w3 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w4, vtmp4567x01234567, 4); w4 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w5, vtmp4567x01234567, 5); w5 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w6, vtmp4567x01234567, 6); w6 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w7, vtmp4567x01234567, 7); w7 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w0, vtmp89ABx01234567, 0); w0 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w1, vtmp89ABx01234567, 1); w1 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w2, vtmp89ABx01234567, 2); w2 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w3, vtmp89ABx01234567, 3); w3 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w4, vtmp89ABx01234567, 4); w4 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w5, vtmp89ABx01234567, 5); w5 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w6, vtmp89ABx01234567, 6); w6 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w7, vtmp89ABx01234567, 7); w7 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w0, vtmpCDEFx01234567, 0); w0 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w1, vtmpCDEFx01234567, 1); w1 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w2, vtmpCDEFx01234567, 2); w2 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w3, vtmpCDEFx01234567, 3); w3 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w4, vtmpCDEFx01234567, 4); w4 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w5, vtmpCDEFx01234567, 5); w5 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w6, vtmpCDEFx01234567, 6); w6 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w7, vtmpCDEFx01234567, 7); w7 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
xnn_prefetch_to_l1((const int8_t*) w7 + 128);
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[3]); packed_weights += 8;
}
// KC remainder multiple of 4
while (k >= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vtmp0x01234567 = vld1q_lane_u16(w7, vtmp0x01234567, 7); w7 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vtmp01x01234567 = vld2q_lane_u16(w7, vtmp01x01234567, 7); w7 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vtmp012x01234567 = vld3q_lane_u16(w7, vtmp012x01234567, 7); w7 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (8 - n);
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
// NR remainder has less than 8 rows so last row is not loaded
const uint16_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint16_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint16_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint16_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint16_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint16_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 16
size_t k = kc;
for (; k >= 16; k -= 16) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w0, vtmp4567x01234567, 0); w0 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w1, vtmp4567x01234567, 1); w1 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w2, vtmp4567x01234567, 2); w2 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w3, vtmp4567x01234567, 3); w3 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w4, vtmp4567x01234567, 4); w4 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w5, vtmp4567x01234567, 5); w5 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w6, vtmp4567x01234567, 6); w6 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w0, vtmp89ABx01234567, 0); w0 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w1, vtmp89ABx01234567, 1); w1 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w2, vtmp89ABx01234567, 2); w2 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w3, vtmp89ABx01234567, 3); w3 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w4, vtmp89ABx01234567, 4); w4 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w5, vtmp89ABx01234567, 5); w5 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w6, vtmp89ABx01234567, 6); w6 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w0, vtmpCDEFx01234567, 0); w0 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w1, vtmpCDEFx01234567, 1); w1 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w2, vtmpCDEFx01234567, 2); w2 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w3, vtmpCDEFx01234567, 3); w3 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w4, vtmpCDEFx01234567, 4); w4 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w5, vtmpCDEFx01234567, 5); w5 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w6, vtmpCDEFx01234567, 6); w6 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[3]); packed_weights += 8;
}
// KC remainder multiple of 4
while (k >= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 20,552
| 47.819477
| 84
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-packw/gen/x16-packw-x8-gemm-goi-neon-ld4lane-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/x16-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
void xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x16(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint16_t* weights,
const uint16_t* bias,
uint16_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint16x8x4_t vtmp0123x01234567;
uint16x8x4_t vtmp4567x01234567;
uint16x8x4_t vtmp89ABx01234567;
uint16x8x4_t vtmpCDEFx01234567;
do {
// NC main loop multiple of 8
const uint16_t* w0 = weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(bias != NULL) {
uint16x8_t vb0 = vld1q_u16(bias); bias += 8;
vst1q_u16(packed_weights, vb0); packed_weights += 8;
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
const uint16_t* w1 = w0 + kc;
const uint16_t* w2 = w1 + kc;
const uint16_t* w3 = w2 + kc;
const uint16_t* w4 = w3 + kc;
const uint16_t* w5 = w4 + kc;
const uint16_t* w6 = w5 + kc;
const uint16_t* w7 = w6 + kc;
// KC main loop multiple of 16
size_t k = kc;
for (; k >= 16; k -= 16) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w0, vtmp4567x01234567, 0); w0 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w1, vtmp4567x01234567, 1); w1 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w2, vtmp4567x01234567, 2); w2 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w3, vtmp4567x01234567, 3); w3 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w4, vtmp4567x01234567, 4); w4 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w5, vtmp4567x01234567, 5); w5 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w6, vtmp4567x01234567, 6); w6 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w7, vtmp4567x01234567, 7); w7 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w0, vtmp89ABx01234567, 0); w0 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w1, vtmp89ABx01234567, 1); w1 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w2, vtmp89ABx01234567, 2); w2 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w3, vtmp89ABx01234567, 3); w3 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w4, vtmp89ABx01234567, 4); w4 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w5, vtmp89ABx01234567, 5); w5 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w6, vtmp89ABx01234567, 6); w6 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w7, vtmp89ABx01234567, 7); w7 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w0, vtmpCDEFx01234567, 0); w0 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w1, vtmpCDEFx01234567, 1); w1 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w2, vtmpCDEFx01234567, 2); w2 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w3, vtmpCDEFx01234567, 3); w3 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w4, vtmpCDEFx01234567, 4); w4 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w5, vtmpCDEFx01234567, 5); w5 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w6, vtmpCDEFx01234567, 6); w6 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w7, vtmpCDEFx01234567, 7); w7 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[3]); packed_weights += 8;
}
// KC remainder multiple of 4
while (k >= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vtmp0x01234567 = vld1q_lane_u16(w7, vtmp0x01234567, 7); w7 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vtmp01x01234567 = vld2q_lane_u16(w7, vtmp01x01234567, 7); w7 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vtmp012x01234567 = vld3q_lane_u16(w7, vtmp012x01234567, 7); w7 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (8 - n);
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
// NR remainder has less than 8 rows so last row is not loaded
const uint16_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint16_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint16_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint16_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint16_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint16_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 16
size_t k = kc;
for (; k >= 16; k -= 16) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w0, vtmp4567x01234567, 0); w0 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w1, vtmp4567x01234567, 1); w1 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w2, vtmp4567x01234567, 2); w2 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w3, vtmp4567x01234567, 3); w3 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w4, vtmp4567x01234567, 4); w4 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w5, vtmp4567x01234567, 5); w5 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w6, vtmp4567x01234567, 6); w6 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w0, vtmp89ABx01234567, 0); w0 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w1, vtmp89ABx01234567, 1); w1 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w2, vtmp89ABx01234567, 2); w2 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w3, vtmp89ABx01234567, 3); w3 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w4, vtmp89ABx01234567, 4); w4 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w5, vtmp89ABx01234567, 5); w5 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w6, vtmp89ABx01234567, 6); w6 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w0, vtmpCDEFx01234567, 0); w0 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w1, vtmpCDEFx01234567, 1); w1 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w2, vtmpCDEFx01234567, 2); w2 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w3, vtmpCDEFx01234567, 3); w3 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w4, vtmpCDEFx01234567, 4); w4 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w5, vtmpCDEFx01234567, 5); w5 += 4;
vtmpCDEFx01234567 = vld4q_lane_u16(w6, vtmpCDEFx01234567, 6); w6 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmpCDEFx01234567.val[3]); packed_weights += 8;
}
// KC remainder multiple of 4
while (k >= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 18,931
| 47.66838
| 84
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-packw/gen/x16-packw-x8-gemm-goi-neon-ld4lane-x4-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x16-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
#include <xnnpack/prefetch.h>
void xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x4_prfm(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint16_t* weights,
const uint16_t* bias,
uint16_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint16x8x4_t vtmp0123x01234567;
do {
// NC main loop multiple of 8
const uint16_t* w0 = weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(bias != NULL) {
uint16x8_t vb0 = vld1q_u16(bias); bias += 8;
vst1q_u16(packed_weights, vb0); packed_weights += 8;
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
const uint16_t* w1 = w0 + kc;
const uint16_t* w2 = w1 + kc;
const uint16_t* w3 = w2 + kc;
const uint16_t* w4 = w3 + kc;
const uint16_t* w5 = w4 + kc;
const uint16_t* w6 = w5 + kc;
const uint16_t* w7 = w6 + kc;
xnn_prefetch_to_l1((const int8_t*) w0);
xnn_prefetch_to_l1((const int8_t*) w0 + 64);
xnn_prefetch_to_l1((const int8_t*) w1);
xnn_prefetch_to_l1((const int8_t*) w1 + 64);
xnn_prefetch_to_l1((const int8_t*) w2);
xnn_prefetch_to_l1((const int8_t*) w2 + 64);
xnn_prefetch_to_l1((const int8_t*) w3);
xnn_prefetch_to_l1((const int8_t*) w3 + 64);
xnn_prefetch_to_l1((const int8_t*) w4);
xnn_prefetch_to_l1((const int8_t*) w4 + 64);
xnn_prefetch_to_l1((const int8_t*) w5);
xnn_prefetch_to_l1((const int8_t*) w5 + 64);
xnn_prefetch_to_l1((const int8_t*) w6);
xnn_prefetch_to_l1((const int8_t*) w6 + 64);
xnn_prefetch_to_l1((const int8_t*) w7);
xnn_prefetch_to_l1((const int8_t*) w7 + 64);
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
xnn_prefetch_to_l1((const int8_t*) w7 + 128);
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vtmp0x01234567 = vld1q_lane_u16(w7, vtmp0x01234567, 7); w7 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vtmp01x01234567 = vld2q_lane_u16(w7, vtmp01x01234567, 7); w7 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vtmp012x01234567 = vld3q_lane_u16(w7, vtmp012x01234567, 7); w7 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (8 - n);
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
// NR remainder has less than 8 rows so last row is not loaded
const uint16_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint16_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint16_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint16_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint16_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint16_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 12,913
| 39.610063
| 84
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-packw/gen/x16-packw-x8-gemm-goi-neon-ld4lane-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x16-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
void xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint16_t* weights,
const uint16_t* bias,
uint16_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint16x8x4_t vtmp0123x01234567;
do {
// NC main loop multiple of 8
const uint16_t* w0 = weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(bias != NULL) {
uint16x8_t vb0 = vld1q_u16(bias); bias += 8;
vst1q_u16(packed_weights, vb0); packed_weights += 8;
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
const uint16_t* w1 = w0 + kc;
const uint16_t* w2 = w1 + kc;
const uint16_t* w3 = w2 + kc;
const uint16_t* w4 = w3 + kc;
const uint16_t* w5 = w4 + kc;
const uint16_t* w6 = w5 + kc;
const uint16_t* w7 = w6 + kc;
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vtmp0x01234567 = vld1q_lane_u16(w7, vtmp0x01234567, 7); w7 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vtmp01x01234567 = vld2q_lane_u16(w7, vtmp01x01234567, 7); w7 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vtmp012x01234567 = vld3q_lane_u16(w7, vtmp012x01234567, 7); w7 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (8 - n);
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
// NR remainder has less than 8 rows so last row is not loaded
const uint16_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint16_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint16_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint16_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint16_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint16_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 11,292
| 38.486014
| 84
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-packw/gen/x16-packw-x8-gemm-goi-neon-ld4lane-x8-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x16-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
#include <xnnpack/prefetch.h>
void xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x8_prfm(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint16_t* weights,
const uint16_t* bias,
uint16_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint16x8x4_t vtmp0123x01234567;
uint16x8x4_t vtmp4567x01234567;
do {
// NC main loop multiple of 8
const uint16_t* w0 = weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(bias != NULL) {
uint16x8_t vb0 = vld1q_u16(bias); bias += 8;
vst1q_u16(packed_weights, vb0); packed_weights += 8;
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
const uint16_t* w1 = w0 + kc;
const uint16_t* w2 = w1 + kc;
const uint16_t* w3 = w2 + kc;
const uint16_t* w4 = w3 + kc;
const uint16_t* w5 = w4 + kc;
const uint16_t* w6 = w5 + kc;
const uint16_t* w7 = w6 + kc;
xnn_prefetch_to_l1((const int8_t*) w0);
xnn_prefetch_to_l1((const int8_t*) w0 + 64);
xnn_prefetch_to_l1((const int8_t*) w1);
xnn_prefetch_to_l1((const int8_t*) w1 + 64);
xnn_prefetch_to_l1((const int8_t*) w2);
xnn_prefetch_to_l1((const int8_t*) w2 + 64);
xnn_prefetch_to_l1((const int8_t*) w3);
xnn_prefetch_to_l1((const int8_t*) w3 + 64);
xnn_prefetch_to_l1((const int8_t*) w4);
xnn_prefetch_to_l1((const int8_t*) w4 + 64);
xnn_prefetch_to_l1((const int8_t*) w5);
xnn_prefetch_to_l1((const int8_t*) w5 + 64);
xnn_prefetch_to_l1((const int8_t*) w6);
xnn_prefetch_to_l1((const int8_t*) w6 + 64);
xnn_prefetch_to_l1((const int8_t*) w7);
xnn_prefetch_to_l1((const int8_t*) w7 + 64);
// KC main loop multiple of 8
size_t k = kc;
for (; k >= 8; k -= 8) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w0, vtmp4567x01234567, 0); w0 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w1, vtmp4567x01234567, 1); w1 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w2, vtmp4567x01234567, 2); w2 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w3, vtmp4567x01234567, 3); w3 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w4, vtmp4567x01234567, 4); w4 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w5, vtmp4567x01234567, 5); w5 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w6, vtmp4567x01234567, 6); w6 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w7, vtmp4567x01234567, 7); w7 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
xnn_prefetch_to_l1((const int8_t*) w7 + 128);
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[3]); packed_weights += 8;
}
// KC remainder multiple of 4
if (k >= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vtmp0x01234567 = vld1q_lane_u16(w7, vtmp0x01234567, 7); w7 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vtmp01x01234567 = vld2q_lane_u16(w7, vtmp01x01234567, 7); w7 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vtmp012x01234567 = vld3q_lane_u16(w7, vtmp012x01234567, 7); w7 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (8 - n);
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
// NR remainder has less than 8 rows so last row is not loaded
const uint16_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint16_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint16_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint16_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint16_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint16_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 8
size_t k = kc;
for (; k >= 8; k -= 8) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w0, vtmp4567x01234567, 0); w0 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w1, vtmp4567x01234567, 1); w1 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w2, vtmp4567x01234567, 2); w2 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w3, vtmp4567x01234567, 3); w3 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w4, vtmp4567x01234567, 4); w4 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w5, vtmp4567x01234567, 5); w5 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w6, vtmp4567x01234567, 6); w6 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[3]); packed_weights += 8;
}
// KC remainder multiple of 4
if (k >= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 16,789
| 44.013405
| 84
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-packw/gen/x16-packw-x8-gemm-goi-neon-ld4lane-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/x16-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
void xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x8(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint16_t* weights,
const uint16_t* bias,
uint16_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint16x8x4_t vtmp0123x01234567;
uint16x8x4_t vtmp4567x01234567;
do {
// NC main loop multiple of 8
const uint16_t* w0 = weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(bias != NULL) {
uint16x8_t vb0 = vld1q_u16(bias); bias += 8;
vst1q_u16(packed_weights, vb0); packed_weights += 8;
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
const uint16_t* w1 = w0 + kc;
const uint16_t* w2 = w1 + kc;
const uint16_t* w3 = w2 + kc;
const uint16_t* w4 = w3 + kc;
const uint16_t* w5 = w4 + kc;
const uint16_t* w6 = w5 + kc;
const uint16_t* w7 = w6 + kc;
// KC main loop multiple of 8
size_t k = kc;
for (; k >= 8; k -= 8) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w0, vtmp4567x01234567, 0); w0 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w1, vtmp4567x01234567, 1); w1 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w2, vtmp4567x01234567, 2); w2 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w3, vtmp4567x01234567, 3); w3 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w4, vtmp4567x01234567, 4); w4 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w5, vtmp4567x01234567, 5); w5 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w6, vtmp4567x01234567, 6); w6 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w7, vtmp4567x01234567, 7); w7 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[3]); packed_weights += 8;
}
// KC remainder multiple of 4
if (k >= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vtmp0x01234567 = vld1q_lane_u16(w7, vtmp0x01234567, 7); w7 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vtmp01x01234567 = vld2q_lane_u16(w7, vtmp01x01234567, 7); w7 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vtmp012x01234567 = vld3q_lane_u16(w7, vtmp012x01234567, 7); w7 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (8 - n);
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
// NR remainder has less than 8 rows so last row is not loaded
const uint16_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint16_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint16_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint16_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint16_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint16_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 8
size_t k = kc;
for (; k >= 8; k -= 8) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w0, vtmp4567x01234567, 0); w0 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w1, vtmp4567x01234567, 1); w1 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w2, vtmp4567x01234567, 2); w2 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w3, vtmp4567x01234567, 3); w3 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w4, vtmp4567x01234567, 4); w4 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w5, vtmp4567x01234567, 5); w5 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w6, vtmp4567x01234567, 6); w6 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[3]); packed_weights += 8;
}
// KC remainder multiple of 4
if (k >= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 15,168
| 43.483871
| 84
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-packw/gen/x16-packw-x8-gemm-goi-scalar-int-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x16_packw_gemm_goi_ukernel_x8__scalar_int_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint16_t* weights,
const uint16_t* bias,
uint16_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint16_t* out = (uint16_t*) packed_weights;
const uint16_t* b = (const uint16_t*) bias;
do {
// NC main loop multiple of 8
const uint16_t* w0 = (const uint16_t*) weights;
size_t n = nc;
for (;n >= 8; n -= 8) {
if XNN_LIKELY(b != NULL) {
out[0] = b[0];
out[1] = b[1];
out[2] = b[2];
out[3] = b[3];
out[4] = b[4];
out[5] = b[5];
out[6] = b[6];
out[7] = b[7];
b += 8;
} else {
out[0] = 0;
out[1] = 0;
out[2] = 0;
out[3] = 0;
out[4] = 0;
out[5] = 0;
out[6] = 0;
out[7] = 0;
}
out += 8;
const uint16_t* w1 = w0 + kc;
const uint16_t* w2 = w1 + kc;
const uint16_t* w3 = w2 + kc;
const uint16_t* w4 = w3 + kc;
const uint16_t* w5 = w4 + kc;
const uint16_t* w6 = w5 + kc;
const uint16_t* w7 = w6 + kc;
// KC main loop multiple of 8x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const uint16_t v00 = w0[0];
const uint16_t v01 = w0[1];
const uint16_t v02 = w0[2];
const uint16_t v03 = w0[3];
w0 += 4;
const uint16_t v10 = w1[0];
const uint16_t v11 = w1[1];
const uint16_t v12 = w1[2];
const uint16_t v13 = w1[3];
w1 += 4;
const uint16_t v20 = w2[0];
const uint16_t v21 = w2[1];
const uint16_t v22 = w2[2];
const uint16_t v23 = w2[3];
w2 += 4;
const uint16_t v30 = w3[0];
const uint16_t v31 = w3[1];
const uint16_t v32 = w3[2];
const uint16_t v33 = w3[3];
w3 += 4;
const uint16_t v40 = w4[0];
const uint16_t v41 = w4[1];
const uint16_t v42 = w4[2];
const uint16_t v43 = w4[3];
w4 += 4;
const uint16_t v50 = w5[0];
const uint16_t v51 = w5[1];
const uint16_t v52 = w5[2];
const uint16_t v53 = w5[3];
w5 += 4;
const uint16_t v60 = w6[0];
const uint16_t v61 = w6[1];
const uint16_t v62 = w6[2];
const uint16_t v63 = w6[3];
w6 += 4;
const uint16_t v70 = w7[0];
const uint16_t v71 = w7[1];
const uint16_t v72 = w7[2];
const uint16_t v73 = w7[3];
w7 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[7] = v70;
out[8] = v01;
out[9] = v11;
out[10] = v21;
out[11] = v31;
out[12] = v41;
out[13] = v51;
out[14] = v61;
out[15] = v71;
out[16] = v02;
out[17] = v12;
out[18] = v22;
out[19] = v32;
out[20] = v42;
out[21] = v52;
out[22] = v62;
out[23] = v72;
out[24] = v03;
out[25] = v13;
out[26] = v23;
out[27] = v33;
out[28] = v43;
out[29] = v53;
out[30] = v63;
out[31] = v73;
out += 32;
}
// KC remainder
for (; k != 0; --k) {
const uint16_t v0 = *w0++;
out[0] = v0;
const uint16_t v1 = *w1++;
out[1] = v1;
const uint16_t v2 = *w2++;
out[2] = v2;
const uint16_t v3 = *w3++;
out[3] = v3;
const uint16_t v4 = *w4++;
out[4] = v4;
const uint16_t v5 = *w5++;
out[5] = v5;
const uint16_t v6 = *w6++;
out[6] = v6;
const uint16_t v7 = *w7++;
out[7] = v7;
out += 8;
}
out = (uint16_t*) ((uintptr_t) out + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*out++ = *b++;
} while (--nb != 0);
} else {
size_t nb = n;
do {
*out++ = 0;
} while (--nb != 0);
}
out += (8 - n);
// NR remainder has less than 8 rows so last row is not loaded
const uint16_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint16_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint16_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint16_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint16_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint16_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 8x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const uint16_t v00 = w0[0];
const uint16_t v01 = w0[1];
const uint16_t v02 = w0[2];
const uint16_t v03 = w0[3];
w0 += 4;
const uint16_t v10 = w1[0];
const uint16_t v11 = w1[1];
const uint16_t v12 = w1[2];
const uint16_t v13 = w1[3];
w1 += 4;
const uint16_t v20 = w2[0];
const uint16_t v21 = w2[1];
const uint16_t v22 = w2[2];
const uint16_t v23 = w2[3];
w2 += 4;
const uint16_t v30 = w3[0];
const uint16_t v31 = w3[1];
const uint16_t v32 = w3[2];
const uint16_t v33 = w3[3];
w3 += 4;
const uint16_t v40 = w4[0];
const uint16_t v41 = w4[1];
const uint16_t v42 = w4[2];
const uint16_t v43 = w4[3];
w4 += 4;
const uint16_t v50 = w5[0];
const uint16_t v51 = w5[1];
const uint16_t v52 = w5[2];
const uint16_t v53 = w5[3];
w5 += 4;
const uint16_t v60 = w6[0];
const uint16_t v61 = w6[1];
const uint16_t v62 = w6[2];
const uint16_t v63 = w6[3];
w6 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[8] = v01;
out[9] = v11;
out[10] = v21;
out[11] = v31;
out[12] = v41;
out[13] = v51;
out[14] = v61;
out[16] = v02;
out[17] = v12;
out[18] = v22;
out[19] = v32;
out[20] = v42;
out[21] = v52;
out[22] = v62;
out[24] = v03;
out[25] = v13;
out[26] = v23;
out[27] = v33;
out[28] = v43;
out[29] = v53;
out[30] = v63;
out += 32;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const uint16_t v0 = *w0++;
out[0] = v0;
const uint16_t v1 = *w1++;
out[1] = v1;
const uint16_t v2 = *w2++;
out[2] = v2;
const uint16_t v3 = *w3++;
out[3] = v3;
const uint16_t v4 = *w4++;
out[4] = v4;
const uint16_t v5 = *w5++;
out[5] = v5;
const uint16_t v6 = *w6++;
out[6] = v6;
out += 8;
}
out = (uint16_t*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 7,913
| 24.12381
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/x16-transposec-4x8-sse2.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x16_transposec_ukernel__4x8_sse2(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 4;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
const size_t input_offset = tile_height * input_stride;
const uint16_t* i0 = (const uint16_t*) input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
uint16_t* o0 = (uint16_t*) output;
uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
uint16_t* o2 = (uint16_t*) ((uintptr_t) o1 + output_stride);
uint16_t* o3 = (uint16_t*) ((uintptr_t) o2 + output_stride);
uint16_t* o4 = (uint16_t*) ((uintptr_t) o3 + output_stride);
uint16_t* o5 = (uint16_t*) ((uintptr_t) o4 + output_stride);
uint16_t* o6 = (uint16_t*) ((uintptr_t) o5 + output_stride);
uint16_t* o7 = (uint16_t*) ((uintptr_t) o6 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 4) {
o4 = o0;
}
if XNN_UNPREDICTABLE(block_width < 6) {
o5 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 6) {
o6 = o0;
}
if XNN_UNPREDICTABLE(block_width < 8) {
o7 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
__m128i v0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
__m128i v1 = _mm_loadu_si128((const __m128i*) i1);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
__m128i v2 = _mm_loadu_si128((const __m128i*) i2);
i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
__m128i v3 = _mm_loadu_si128((const __m128i*) i3);
i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
__m128i vtmp0 = _mm_unpacklo_epi16(v0, v2);
__m128i vtmp1 = _mm_unpacklo_epi16(v1, v3);
__m128i vtmp2 = _mm_unpackhi_epi16(v0, v2);
__m128i vtmp3 = _mm_unpackhi_epi16(v1, v3);
v0 = _mm_unpacklo_epi16(vtmp0, vtmp1);
v1 = _mm_unpacklo_epi16(vtmp2, vtmp3);
v2 = _mm_unpackhi_epi16(vtmp0, vtmp1);
v3 = _mm_unpackhi_epi16(vtmp2, vtmp3);
_mm_storeh_pi((__m64*) o7, _mm_castsi128_ps(v3));
o7 = (uint16_t*) ((uintptr_t) o7 + tile_hbytes);
_mm_storel_epi64((__m128i*) o6, v3);
o6 = (uint16_t*) ((uintptr_t) o6 + tile_hbytes);
_mm_storeh_pi((__m64*) o5, _mm_castsi128_ps(v1));
o5 = (uint16_t*) ((uintptr_t) o5 + tile_hbytes);
_mm_storel_epi64((__m128i*) o4, v1);
o4 = (uint16_t*) ((uintptr_t) o4 + tile_hbytes);
_mm_storeh_pi((__m64*) o3, _mm_castsi128_ps(v2));
o3 = (uint16_t*) ((uintptr_t) o3 + tile_hbytes);
_mm_storel_epi64((__m128i*) o2, v2);
o2 = (uint16_t*) ((uintptr_t) o2 + tile_hbytes);
_mm_storeh_pi((__m64*) o1, _mm_castsi128_ps(v0));
o1 = (uint16_t*) ((uintptr_t) o1 + tile_hbytes);
_mm_storel_epi64((__m128i*) o0, v0);
o0 = (uint16_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
__m128i v0 = _mm_loadu_si128((const __m128i*) i0);
__m128i v1 = _mm_loadu_si128((const __m128i*) i1);
__m128i v2 = _mm_loadu_si128((const __m128i*) i2);
__m128i vtmp0 = _mm_unpacklo_epi16(v0, v2);
__m128i vtmp1 = _mm_unpacklo_epi16(v1, v1);
__m128i vtmp2 = _mm_unpackhi_epi16(v0, v2);
__m128i vtmp3 = _mm_unpackhi_epi16(v1, v1);
v0 = _mm_unpacklo_epi16(vtmp0, vtmp1);
v1 = _mm_unpacklo_epi16(vtmp2, vtmp3);
v2 = _mm_unpackhi_epi16(vtmp0, vtmp1);
__m128i v3 = _mm_unpackhi_epi16(vtmp2, vtmp3);
if (bh & 2) {
unaligned_store_u32(o7, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(v3, 0xE)));
o7 += 2;
unaligned_store_u32(o6, (uint32_t) _mm_cvtsi128_si32(v3));
o6 += 2;
unaligned_store_u32(o5, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(v1, 0xE)));
o5 += 2;
unaligned_store_u32(o4, (uint32_t) _mm_cvtsi128_si32(v1));
o4 += 2;
unaligned_store_u32(o3, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(v2, 0xE)));
o3 += 2;
unaligned_store_u32(o2, (uint32_t) _mm_cvtsi128_si32(v2));
o2 += 2;
unaligned_store_u32(o1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(v0, 0xE)));
o1 += 2;
unaligned_store_u32(o0, (uint32_t) _mm_cvtsi128_si32(v0));
o0 += 2;
v0 = _mm_srli_epi64(v0, 32);
v1 = _mm_srli_epi64(v1, 32);
v2 = _mm_srli_epi64(v2, 32);
v3 = _mm_srli_epi64(v3, 32);
}
if (bh & 1) {
*o7 = (uint16_t) _mm_extract_epi16(v3, 4);
*o6 = (uint16_t) _mm_cvtsi128_si32(v3);
*o5 = (uint16_t) _mm_extract_epi16(v1, 4);
*o4 = (uint16_t) _mm_cvtsi128_si32(v1);
*o3 = (uint16_t) _mm_extract_epi16(v2, 4);
*o2 = (uint16_t) _mm_cvtsi128_si32(v2);
*o1 = (uint16_t) _mm_extract_epi16(v0, 4);
*o0 = (uint16_t) _mm_cvtsi128_si32(v0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
o0 = (uint16_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint16_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint16_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint16_t*) ((uintptr_t) o3 + output_reset);
o4 = (uint16_t*) ((uintptr_t) o4 + output_reset);
o5 = (uint16_t*) ((uintptr_t) o5 + output_reset);
o6 = (uint16_t*) ((uintptr_t) o6 + output_reset);
o7 = (uint16_t*) ((uintptr_t) o7 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 6,969
| 37.722222
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-1x2-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__1x2_scalar_int(
const uint16_t *input,
uint16_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int16_t));
assert(input_stride >= block_width * sizeof(int16_t));
const size_t tile_height = 1;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(int16_t);
const size_t input_reset = tile_wbytes - block_height * input_stride;
const size_t output_reset = tile_width * output_stride - block_height * sizeof(int16_t);
const size_t input_offset = tile_height * input_stride;
const int16_t* i0 = (const int16_t*) input;
int16_t* o0 = (int16_t*) output;
int16_t* o1 = (int16_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 1; bh -= 1) {
*o1++ = i0[1];
*o0++ = i0[0];
i0 = (const int16_t*) ((uintptr_t) i0 + input_offset);
}
i0 = (const int16_t*) ((uintptr_t) i0 + input_reset);
o0 = (int16_t*) ((uintptr_t) o0 + output_reset);
o1 = (int16_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 1,744
| 29.086207
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-1x4-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__1x4_scalar_int(
const uint16_t *input,
uint16_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int16_t));
assert(input_stride >= block_width * sizeof(int16_t));
const size_t tile_height = 1;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * sizeof(int16_t);
const size_t input_reset = tile_wbytes - block_height * input_stride;
const size_t output_reset = tile_width * output_stride - block_height * sizeof(int16_t);
const size_t input_offset = tile_height * input_stride;
const int16_t* i0 = (const int16_t*) input;
int16_t* o0 = (int16_t*) output;
int16_t* o1 = (int16_t*) ((uintptr_t) o0 + output_stride);
int16_t* o2 = (int16_t*) ((uintptr_t) o1 + output_stride);
int16_t* o3 = (int16_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 1; bh -= 1) {
*o3++ = i0[3];
*o2++ = i0[2];
*o1++ = i0[1];
*o0++ = i0[0];
i0 = (const int16_t*) ((uintptr_t) i0 + input_offset);
}
i0 = (const int16_t*) ((uintptr_t) i0 + input_reset);
o0 = (int16_t*) ((uintptr_t) o0 + output_reset);
o1 = (int16_t*) ((uintptr_t) o1 + output_reset);
o2 = (int16_t*) ((uintptr_t) o2 + output_reset);
o3 = (int16_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,145
| 29.657143
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-2x1-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__2x1_scalar_int(
const uint16_t *input,
uint16_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int16_t));
assert(input_stride >= block_width * sizeof(int16_t));
const size_t tile_height = 2;
const size_t tile_width = 1;
const size_t tile_wbytes = tile_width * sizeof(int16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int16_t);
const size_t input_offset = tile_height * input_stride;
const int16_t* i0 = (const int16_t*) input;
const int16_t* i1 = (const int16_t*) ((uintptr_t) i0 + input_stride);
int16_t* o0 = (int16_t*) output;
do {
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const int16_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int16_t*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o0[0] = i0[0];
}
i0 = (const int16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int16_t*) ((uintptr_t) i0 + input_stride);
o0 = (int16_t*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 1,850
| 30.372881
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-2x2-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__2x2_scalar_int(
const uint16_t *input,
uint16_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int16_t));
assert(input_stride >= block_width * sizeof(int16_t));
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(int16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int16_t);
const size_t input_offset = tile_height * input_stride;
const int16_t* i0 = (const int16_t*) input;
const int16_t* i1 = (const int16_t*) ((uintptr_t) i0 + input_stride);
int16_t* o0 = (int16_t*) output;
int16_t* o1 = (int16_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o1++ = i0[1];
*o1++ = i1[1];
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const int16_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int16_t*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o1[0] = i0[1];
o0[0] = i0[0];
}
i0 = (const int16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int16_t*) ((uintptr_t) i0 + input_stride);
o0 = (int16_t*) ((uintptr_t) o0 + output_reset);
o1 = (int16_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,092
| 30.238806
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-2x4-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__2x4_scalar_int(
const uint16_t *input,
uint16_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int16_t));
assert(input_stride >= block_width * sizeof(int16_t));
const size_t tile_height = 2;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * sizeof(int16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int16_t);
const size_t input_offset = tile_height * input_stride;
const int16_t* i0 = (const int16_t*) input;
const int16_t* i1 = (const int16_t*) ((uintptr_t) i0 + input_stride);
int16_t* o0 = (int16_t*) output;
int16_t* o1 = (int16_t*) ((uintptr_t) o0 + output_stride);
int16_t* o2 = (int16_t*) ((uintptr_t) o1 + output_stride);
int16_t* o3 = (int16_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
*o3++ = i0[3];
*o3++ = i1[3];
*o2++ = i0[2];
*o2++ = i1[2];
*o1++ = i0[1];
*o1++ = i1[1];
*o0++ = i0[0];
*o0++ = i1[0];
i0 = (const int16_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int16_t*) ((uintptr_t) i1 + input_offset);
}
if (bh & 1) {
o3[0] = i0[3];
o2[0] = i0[2];
o1[0] = i0[1];
o0[0] = i0[0];
}
i0 = (const int16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int16_t*) ((uintptr_t) i0 + input_stride);
o0 = (int16_t*) ((uintptr_t) o0 + output_reset);
o1 = (int16_t*) ((uintptr_t) o1 + output_reset);
o2 = (int16_t*) ((uintptr_t) o2 + output_reset);
o3 = (int16_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,577
| 30.060241
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-4x1-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__4x1_scalar_int(
const uint16_t *input,
uint16_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int16_t));
assert(input_stride >= block_width * sizeof(int16_t));
const size_t tile_height = 4;
const size_t tile_width = 1;
const size_t tile_wbytes = tile_width * sizeof(int16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int16_t);
const size_t input_offset = tile_height * input_stride;
const int16_t* i0 = (const int16_t*) input;
const int16_t* i1 = (const int16_t*) ((uintptr_t) i0 + input_stride);
const int16_t* i2 = (const int16_t*) ((uintptr_t) i1 + input_stride);
const int16_t* i3 = (const int16_t*) ((uintptr_t) i2 + input_stride);
int16_t* o0 = (int16_t*) output;
do {
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const int16_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int16_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int16_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int16_t*) ((uintptr_t) i3 + input_offset);
}
const int16_t* i = i0;
if (bh & 2) {
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o0[0] = i[0];
}
i0 = (const int16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int16_t*) ((uintptr_t) i0 + input_stride);
i2 = (const int16_t*) ((uintptr_t) i1 + input_stride);
i3 = (const int16_t*) ((uintptr_t) i2 + input_stride);
o0 = (int16_t*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,397
| 31.405405
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-4x2-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__4x2_scalar_int(
const uint16_t *input,
uint16_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int16_t));
assert(input_stride >= block_width * sizeof(int16_t));
const size_t tile_height = 4;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * sizeof(int16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int16_t);
const size_t input_offset = tile_height * input_stride;
const int16_t* i0 = (const int16_t*) input;
const int16_t* i1 = (const int16_t*) ((uintptr_t) i0 + input_stride);
const int16_t* i2 = (const int16_t*) ((uintptr_t) i1 + input_stride);
const int16_t* i3 = (const int16_t*) ((uintptr_t) i2 + input_stride);
int16_t* o0 = (int16_t*) output;
int16_t* o1 = (int16_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o1++ = i0[1];
*o1++ = i1[1];
*o1++ = i2[1];
*o1++ = i3[1];
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const int16_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int16_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int16_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int16_t*) ((uintptr_t) i3 + input_offset);
}
const int16_t* i = i0;
if (bh & 2) {
o1[0] = i0[1];
o1[1] = i1[1];
o1 += 2;
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o1[0] = i[1];
o0[0] = i[0];
}
i0 = (const int16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int16_t*) ((uintptr_t) i0 + input_stride);
i2 = (const int16_t*) ((uintptr_t) i1 + input_stride);
i3 = (const int16_t*) ((uintptr_t) i2 + input_stride);
o0 = (int16_t*) ((uintptr_t) o0 + output_reset);
o1 = (int16_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 2,737
| 30.471264
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-4x4-multi-dec-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__4x4_multi_dec_zip_neon(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t) - tile_hbytes;
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
uint16_t* o = (uint16_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint16x4_t v2_0 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_offset);
const uint16x4_t v2_1 = vld1_u16(i1); i1 = (uint16_t*) ((uintptr_t) i1 + input_offset);
const uint16x4_t v2_2 = vld1_u16(i2); i2 = (uint16_t*) ((uintptr_t) i2 + input_offset);
const uint16x4_t v2_3 = vld1_u16(i3); i3 = (uint16_t*) ((uintptr_t) i3 + input_offset);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
o = (uint16_t*) ((uintptr_t) o + oN_offset);
vst1_u16(o, v0_1.val[1]);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v0_1.val[0]);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v0_0.val[1]);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v0_0.val[0]);
}
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint16x4_t v2_0 = vld1_u16(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint16x4_t v2_1 = vld1_u16(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const uint16x4_t v2_2 = vld1_u16(i2);
const uint16x4_t v2_3 = vmov_n_u16(0);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
uint16x4_t v0_low = v0_0.val[0];
uint16x4_t v1_low = v0_0.val[1];
uint16x4_t v2_low = v0_1.val[0];
uint16x4_t v3_low = v0_1.val[1];
if (bh & 2) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v3_low), 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v2_low), 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v1_low), 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v0_low), 0); o += 2;
v0_low = vext_u16(v0_low, v0_low, 2);
v1_low = vext_u16(v1_low, v1_low, 2);
v2_low = vext_u16(v2_low, v2_low, 2);
v3_low = vext_u16(v3_low, v3_low, 2);
}
if (bh & 1) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u16(o, v3_low, 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v2_low, 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v1_low, 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v0_low, 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,715
| 37.621622
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-4x4-multi-mov-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__4x4_multi_mov_zip_neon(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t) - tile_hbytes;
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
uint16_t* o = (uint16_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint16x4_t v2_0 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_offset);
const uint16x4_t v2_1 = vld1_u16(i1); i1 = (uint16_t*) ((uintptr_t) i1 + input_offset);
const uint16x4_t v2_2 = vld1_u16(i2); i2 = (uint16_t*) ((uintptr_t) i2 + input_offset);
const uint16x4_t v2_3 = vld1_u16(i3); i3 = (uint16_t*) ((uintptr_t) i3 + input_offset);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
o = (uint16_t*) ((uintptr_t) o + oN_offset);
vst1_u16(o, v0_1.val[1]);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_u16(o, v0_1.val[0]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_u16(o, v0_0.val[1]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_u16(o, v0_0.val[0]);
}
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint16x4_t v2_0 = vld1_u16(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint16x4_t v2_1 = vld1_u16(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const uint16x4_t v2_2 = vld1_u16(i2);
const uint16x4_t v2_3 = vmov_n_u16(0);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
uint16x4_t v0_low = v0_0.val[0];
uint16x4_t v1_low = v0_0.val[1];
uint16x4_t v2_low = v0_1.val[0];
uint16x4_t v3_low = v0_1.val[1];
if (bh & 2) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v3_low), 0);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v2_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v1_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v0_low), 0); o += 2;
v0_low = vext_u16(v0_low, v0_low, 2);
v1_low = vext_u16(v1_low, v1_low, 2);
v2_low = vext_u16(v2_low, v2_low, 2);
v3_low = vext_u16(v3_low, v3_low, 2);
}
if (bh & 1) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u16(o, v3_low, 0);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u16(o, v2_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u16(o, v1_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u16(o, v0_low, 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,892
| 36.535032
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-4x4-multi-multi-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__4x4_multi_multi_zip_neon(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
uint16_t* o0 = (uint16_t*) output;
uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
uint16_t* o2 = (uint16_t*) ((uintptr_t) o1 + output_stride);
uint16_t* o3 = (uint16_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint16x4_t v2_0 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_offset);
const uint16x4_t v2_1 = vld1_u16(i1); i1 = (uint16_t*) ((uintptr_t) i1 + input_offset);
const uint16x4_t v2_2 = vld1_u16(i2); i2 = (uint16_t*) ((uintptr_t) i2 + input_offset);
const uint16x4_t v2_3 = vld1_u16(i3); i3 = (uint16_t*) ((uintptr_t) i3 + input_offset);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
vst1_u16(o3, v0_1.val[1]); o3 = (uint16_t*) ((uintptr_t) o3 + tile_hbytes);
vst1_u16(o2, v0_1.val[0]); o2 = (uint16_t*) ((uintptr_t) o2 + tile_hbytes);
vst1_u16(o1, v0_0.val[1]); o1 = (uint16_t*) ((uintptr_t) o1 + tile_hbytes);
vst1_u16(o0, v0_0.val[0]); o0 = (uint16_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const uint16x4_t v2_0 = vld1_u16(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint16x4_t v2_1 = vld1_u16(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const uint16x4_t v2_2 = vld1_u16(i2);
const uint16x4_t v2_3 = vmov_n_u16(0);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
uint16x4_t v0_low = v0_0.val[0];
uint16x4_t v1_low = v0_0.val[1];
uint16x4_t v2_low = v0_1.val[0];
uint16x4_t v3_low = v0_1.val[1];
if (bh & 2) {
vst1_lane_u32((void*) o3, vreinterpret_u32_u16(v3_low), 0); o3 += 2;
vst1_lane_u32((void*) o2, vreinterpret_u32_u16(v2_low), 0); o2 += 2;
vst1_lane_u32((void*) o1, vreinterpret_u32_u16(v1_low), 0); o1 += 2;
vst1_lane_u32((void*) o0, vreinterpret_u32_u16(v0_low), 0); o0 += 2;
v0_low = vext_u16(v0_low, v0_low, 2);
v1_low = vext_u16(v1_low, v1_low, 2);
v2_low = vext_u16(v2_low, v2_low, 2);
v3_low = vext_u16(v3_low, v3_low, 2);
}
if (bh & 1) {
vst1_lane_u16(o3, v3_low, 0);
vst1_lane_u16(o2, v2_low, 0);
vst1_lane_u16(o1, v1_low, 0);
vst1_lane_u16(o0, v0_low, 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
o0 = (uint16_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint16_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint16_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint16_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 4,956
| 37.726563
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-4x4-multi-switch-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__4x4_multi_switch_zip_neon(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
uint16_t* o = (uint16_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint16x4_t v2_0 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_offset);
const uint16x4_t v2_1 = vld1_u16(i1); i1 = (uint16_t*) ((uintptr_t) i1 + input_offset);
const uint16x4_t v2_2 = vld1_u16(i2); i2 = (uint16_t*) ((uintptr_t) i2 + input_offset);
const uint16x4_t v2_3 = vld1_u16(i3); i3 = (uint16_t*) ((uintptr_t) i3 + input_offset);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
vst1_u16(oN, v0_1.val[1]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_u16(oN, v0_1.val[0]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_u16(oN, v0_0.val[1]);
case 0:
vst1_u16(o, v0_0.val[0]); o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const uint16x4_t v2_0 = vld1_u16(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint16x4_t v2_1 = vld1_u16(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const uint16x4_t v2_2 = vld1_u16(i2);
const uint16x4_t v2_3 = vmov_n_u16(0);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
uint16x4_t v0_low = v0_0.val[0];
uint16x4_t v1_low = v0_0.val[1];
uint16x4_t v2_low = v0_1.val[0];
uint16x4_t v3_low = v0_1.val[1];
if (bh & 2) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v3_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v2_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v1_low), 0);
case 0:
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v0_low), 0); o += 2;
break;
default:
XNN_UNREACHABLE;
}
v0_low = vext_u16(v0_low, v0_low, 2);
v1_low = vext_u16(v1_low, v1_low, 2);
v2_low = vext_u16(v2_low, v2_low, 2);
v3_low = vext_u16(v3_low, v3_low, 2);
}
if (bh & 1) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
vst1_lane_u16(oN, v3_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u16(oN, v2_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u16(oN, v1_low, 0);
case 0:
vst1_lane_u16(o, v0_low, 0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,456
| 36.376712
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-4x4-reuse-dec-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__4x4_reuse_dec_zip_neon(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t) - tile_hbytes;
const uint16_t* i0 = input;
uint16_t* o = (uint16_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint16x4_t v2_0 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4_t v2_1 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4_t v2_2 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4_t v2_3 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
o = (uint16_t*) ((uintptr_t) o + oN_offset);
vst1_u16(o, v0_1.val[1]);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v0_1.val[0]);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v0_0.val[1]);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v0_0.val[0]);
}
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint16x4_t v2_0 = vld1_u16(i0);
const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint16x4_t v2_1 = vld1_u16(i1);
const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint16x4_t v2_2 = vld1_u16(i2);
const uint16x4_t v2_3 = vmov_n_u16(0);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
uint16x4_t v0_low = v0_0.val[0];
uint16x4_t v1_low = v0_0.val[1];
uint16x4_t v2_low = v0_1.val[0];
uint16x4_t v3_low = v0_1.val[1];
if (bh & 2) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v3_low), 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v2_low), 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v1_low), 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v0_low), 0); o += 2;
v0_low = vext_u16(v0_low, v0_low, 2);
v1_low = vext_u16(v1_low, v1_low, 2);
v2_low = vext_u16(v2_low, v2_low, 2);
v3_low = vext_u16(v3_low, v3_low, 2);
}
if (bh & 1) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u16(o, v3_low, 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v2_low, 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v1_low, 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v0_low, 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,411
| 36.846154
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-4x4-reuse-mov-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__4x4_reuse_mov_zip_neon(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t) - tile_hbytes;
const uint16_t* i0 = input;
uint16_t* o = (uint16_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint16x4_t v2_0 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4_t v2_1 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4_t v2_2 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4_t v2_3 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
o = (uint16_t*) ((uintptr_t) o + oN_offset);
vst1_u16(o, v0_1.val[1]);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_u16(o, v0_1.val[0]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_u16(o, v0_0.val[1]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_u16(o, v0_0.val[0]);
}
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint16x4_t v2_0 = vld1_u16(i0);
const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint16x4_t v2_1 = vld1_u16(i1);
const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint16x4_t v2_2 = vld1_u16(i2);
const uint16x4_t v2_3 = vmov_n_u16(0);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
uint16x4_t v0_low = v0_0.val[0];
uint16x4_t v1_low = v0_0.val[1];
uint16x4_t v2_low = v0_1.val[0];
uint16x4_t v3_low = v0_1.val[1];
if (bh & 2) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v3_low), 0);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v2_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v1_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v0_low), 0); o += 2;
v0_low = vext_u16(v0_low, v0_low, 2);
v1_low = vext_u16(v1_low, v1_low, 2);
v2_low = vext_u16(v2_low, v2_low, 2);
v3_low = vext_u16(v3_low, v3_low, 2);
}
if (bh & 1) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u16(o, v3_low, 0);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u16(o, v2_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u16(o, v1_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u16(o, v0_low, 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,588
| 35.769737
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-4x4-reuse-multi-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__4x4_reuse_multi_zip_neon(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
const uint16_t* i0 = input;
uint16_t* o0 = (uint16_t*) output;
uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
uint16_t* o2 = (uint16_t*) ((uintptr_t) o1 + output_stride);
uint16_t* o3 = (uint16_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint16x4_t v2_0 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4_t v2_1 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4_t v2_2 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4_t v2_3 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
vst1_u16(o3, v0_1.val[1]); o3 = (uint16_t*) ((uintptr_t) o3 + tile_hbytes);
vst1_u16(o2, v0_1.val[0]); o2 = (uint16_t*) ((uintptr_t) o2 + tile_hbytes);
vst1_u16(o1, v0_0.val[1]); o1 = (uint16_t*) ((uintptr_t) o1 + tile_hbytes);
vst1_u16(o0, v0_0.val[0]); o0 = (uint16_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const uint16x4_t v2_0 = vld1_u16(i0);
const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint16x4_t v2_1 = vld1_u16(i1);
const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint16x4_t v2_2 = vld1_u16(i2);
const uint16x4_t v2_3 = vmov_n_u16(0);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
uint16x4_t v0_low = v0_0.val[0];
uint16x4_t v1_low = v0_0.val[1];
uint16x4_t v2_low = v0_1.val[0];
uint16x4_t v3_low = v0_1.val[1];
if (bh & 2) {
vst1_lane_u32((void*) o3, vreinterpret_u32_u16(v3_low), 0); o3 += 2;
vst1_lane_u32((void*) o2, vreinterpret_u32_u16(v2_low), 0); o2 += 2;
vst1_lane_u32((void*) o1, vreinterpret_u32_u16(v1_low), 0); o1 += 2;
vst1_lane_u32((void*) o0, vreinterpret_u32_u16(v0_low), 0); o0 += 2;
v0_low = vext_u16(v0_low, v0_low, 2);
v1_low = vext_u16(v1_low, v1_low, 2);
v2_low = vext_u16(v2_low, v2_low, 2);
v3_low = vext_u16(v3_low, v3_low, 2);
}
if (bh & 1) {
vst1_lane_u16(o3, v3_low, 0);
vst1_lane_u16(o2, v2_low, 0);
vst1_lane_u16(o1, v1_low, 0);
vst1_lane_u16(o0, v0_low, 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
o0 = (uint16_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint16_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint16_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint16_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 4,652
| 36.829268
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-4x4-reuse-switch-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__4x4_reuse_switch_zip_neon(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
const uint16_t* i0 = input;
uint16_t* o = (uint16_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 3);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const uint16x4_t v2_0 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4_t v2_1 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4_t v2_2 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4_t v2_3 = vld1_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
vst1_u16(oN, v0_1.val[1]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_u16(oN, v0_1.val[0]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_u16(oN, v0_0.val[1]);
case 0:
vst1_u16(o, v0_0.val[0]); o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const uint16x4_t v2_0 = vld1_u16(i0);
const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint16x4_t v2_1 = vld1_u16(i1);
const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint16x4_t v2_2 = vld1_u16(i2);
const uint16x4_t v2_3 = vmov_n_u16(0);
const uint16x4x2_t v1_0 = vzip_u16(v2_0, v2_2);
const uint16x4x2_t v1_1 = vzip_u16(v2_1, v2_3);
const uint16x4x2_t v0_0 = vzip_u16(v1_0.val[0], v1_1.val[0]);
const uint16x4x2_t v0_1 = vzip_u16(v1_0.val[1], v1_1.val[1]);
uint16x4_t v0_low = v0_0.val[0];
uint16x4_t v1_low = v0_0.val[1];
uint16x4_t v2_low = v0_1.val[0];
uint16x4_t v3_low = v0_1.val[1];
if (bh & 2) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v3_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v2_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v1_low), 0);
case 0:
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v0_low), 0); o += 2;
break;
default:
XNN_UNREACHABLE;
}
v0_low = vext_u16(v0_low, v0_low, 2);
v1_low = vext_u16(v1_low, v1_low, 2);
v2_low = vext_u16(v2_low, v2_low, 2);
v3_low = vext_u16(v3_low, v3_low, 2);
}
if (bh & 1) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 3:
vst1_lane_u16(oN, v3_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u16(oN, v2_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u16(oN, v1_low, 0);
case 0:
vst1_lane_u16(o, v0_low, 0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,152
| 35.546099
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-4x4-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__4x4_scalar_int(
const uint16_t *input,
uint16_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(int16_t));
assert(input_stride >= block_width * sizeof(int16_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * sizeof(int16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int16_t);
const size_t input_offset = tile_height * input_stride;
const int16_t* i0 = (const int16_t*) input;
const int16_t* i1 = (const int16_t*) ((uintptr_t) i0 + input_stride);
const int16_t* i2 = (const int16_t*) ((uintptr_t) i1 + input_stride);
const int16_t* i3 = (const int16_t*) ((uintptr_t) i2 + input_stride);
int16_t* o0 = (int16_t*) output;
int16_t* o1 = (int16_t*) ((uintptr_t) o0 + output_stride);
int16_t* o2 = (int16_t*) ((uintptr_t) o1 + output_stride);
int16_t* o3 = (int16_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
*o3++ = i0[3];
*o3++ = i1[3];
*o3++ = i2[3];
*o3++ = i3[3];
*o2++ = i0[2];
*o2++ = i1[2];
*o2++ = i2[2];
*o2++ = i3[2];
*o1++ = i0[1];
*o1++ = i1[1];
*o1++ = i2[1];
*o1++ = i3[1];
*o0++ = i0[0];
*o0++ = i1[0];
*o0++ = i2[0];
*o0++ = i3[0];
i0 = (const int16_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int16_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int16_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int16_t*) ((uintptr_t) i3 + input_offset);
}
const int16_t* i = i0;
if (bh & 2) {
o3[0] = i0[3];
o3[1] = i1[3];
o3 += 2;
o2[0] = i0[2];
o2[1] = i1[2];
o2 += 2;
o1[0] = i0[1];
o1[1] = i1[1];
o1 += 2;
o0[0] = i0[0];
o0[1] = i1[0];
o0 += 2;
i = i2;
}
if (bh & 1) {
o3[0] = i[3];
o2[0] = i[2];
o1[0] = i[1];
o0[0] = i[0];
}
i0 = (const int16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const int16_t*) ((uintptr_t) i0 + input_stride);
i2 = (const int16_t*) ((uintptr_t) i1 + input_stride);
i3 = (const int16_t*) ((uintptr_t) i2 + input_stride);
o0 = (int16_t*) ((uintptr_t) o0 + output_reset);
o1 = (int16_t*) ((uintptr_t) o1 + output_reset);
o2 = (int16_t*) ((uintptr_t) o2 + output_reset);
o3 = (int16_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,418
| 29.256637
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-multi-dec-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__8x8_multi_dec_zip_neon(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t) - tile_hbytes;
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
const uint16_t* i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
uint16_t* o = (uint16_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const uint16x8_t v3_0 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_offset);
const uint16x8_t v3_1 = vld1q_u16(i1); i1 = (uint16_t*) ((uintptr_t) i1 + input_offset);
const uint16x8_t v3_2 = vld1q_u16(i2); i2 = (uint16_t*) ((uintptr_t) i2 + input_offset);
const uint16x8_t v3_3 = vld1q_u16(i3); i3 = (uint16_t*) ((uintptr_t) i3 + input_offset);
const uint16x8_t v3_4 = vld1q_u16(i4); i4 = (uint16_t*) ((uintptr_t) i4 + input_offset);
const uint16x8_t v3_5 = vld1q_u16(i5); i5 = (uint16_t*) ((uintptr_t) i5 + input_offset);
const uint16x8_t v3_6 = vld1q_u16(i6); i6 = (uint16_t*) ((uintptr_t) i6 + input_offset);
const uint16x8_t v3_7 = vld1q_u16(i7); i7 = (uint16_t*) ((uintptr_t) i7 + input_offset);
const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4);
const uint16x8x2_t v2_1 = vzipq_u16(v3_1, v3_5);
const uint16x8x2_t v2_2 = vzipq_u16(v3_2, v3_6);
const uint16x8x2_t v2_3 = vzipq_u16(v3_3, v3_7);
const uint16x8x2_t v1_0 = vzipq_u16(v2_0.val[0], v2_2.val[0]);
const uint16x8x2_t v1_1 = vzipq_u16(v2_0.val[1], v2_2.val[1]);
const uint16x8x2_t v1_2 = vzipq_u16(v2_1.val[0], v2_3.val[0]);
const uint16x8x2_t v1_3 = vzipq_u16(v2_1.val[1], v2_3.val[1]);
const uint16x8x2_t v0_0 = vzipq_u16(v1_0.val[0], v1_2.val[0]);
const uint16x8x2_t v0_1 = vzipq_u16(v1_0.val[1], v1_2.val[1]);
const uint16x8x2_t v0_2 = vzipq_u16(v1_1.val[0], v1_3.val[0]);
const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]);
o = (uint16_t*) ((uintptr_t) o + oN_offset);
vst1q_u16(o, v0_3.val[1]);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u16(o, v0_3.val[0]);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u16(o, v0_2.val[1]);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u16(o, v0_2.val[0]);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u16(o, v0_1.val[1]);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u16(o, v0_1.val[0]);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u16(o, v0_0.val[1]);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u16(o, v0_0.val[0]);
}
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint16x8_t v3_0 = vld1q_u16(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint16x8_t v3_1 = vld1q_u16(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const uint16x8_t v3_2 = vld1q_u16(i2);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i0;
}
const uint16x8_t v3_3 = vld1q_u16(i3);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i0;
}
const uint16x8_t v3_4 = vld1q_u16(i4);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i0;
}
const uint16x8_t v3_5 = vld1q_u16(i5);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i0;
}
const uint16x8_t v3_6 = vld1q_u16(i6);
const uint16x8_t v3_7 = vmovq_n_u16(0);
const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4);
const uint16x8x2_t v2_1 = vzipq_u16(v3_1, v3_5);
const uint16x8x2_t v2_2 = vzipq_u16(v3_2, v3_6);
const uint16x8x2_t v2_3 = vzipq_u16(v3_3, v3_7);
const uint16x8x2_t v1_0 = vzipq_u16(v2_0.val[0], v2_2.val[0]);
const uint16x8x2_t v1_1 = vzipq_u16(v2_0.val[1], v2_2.val[1]);
const uint16x8x2_t v1_2 = vzipq_u16(v2_1.val[0], v2_3.val[0]);
const uint16x8x2_t v1_3 = vzipq_u16(v2_1.val[1], v2_3.val[1]);
const uint16x8x2_t v0_0 = vzipq_u16(v1_0.val[0], v1_2.val[0]);
const uint16x8x2_t v0_1 = vzipq_u16(v1_0.val[1], v1_2.val[1]);
const uint16x8x2_t v0_2 = vzipq_u16(v1_1.val[0], v1_3.val[0]);
const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]);
uint16x4_t v0_low = vget_low_u16(v0_0.val[0]);
uint16x4_t v1_low = vget_low_u16(v0_0.val[1]);
uint16x4_t v2_low = vget_low_u16(v0_1.val[0]);
uint16x4_t v3_low = vget_low_u16(v0_1.val[1]);
uint16x4_t v4_low = vget_low_u16(v0_2.val[0]);
uint16x4_t v5_low = vget_low_u16(v0_2.val[1]);
uint16x4_t v6_low = vget_low_u16(v0_3.val[0]);
uint16x4_t v7_low = vget_low_u16(v0_3.val[1]);
if (bh & 4) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_u16(o, v7_low);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v6_low);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v5_low);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v4_low);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v3_low);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v2_low);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v1_low);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v0_low); o += 4;
v0_low = vget_high_u16(v0_0.val[0]);
v1_low = vget_high_u16(v0_0.val[1]);
v2_low = vget_high_u16(v0_1.val[0]);
v3_low = vget_high_u16(v0_1.val[1]);
v4_low = vget_high_u16(v0_2.val[0]);
v5_low = vget_high_u16(v0_2.val[1]);
v6_low = vget_high_u16(v0_3.val[0]);
v7_low = vget_high_u16(v0_3.val[1]);
}
if (bh & 2) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v7_low), 0);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v6_low), 0);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v5_low), 0);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v4_low), 0);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v3_low), 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v2_low), 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v1_low), 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v0_low), 0); o += 2;
v0_low = vext_u16(v0_low, v0_low, 2);
v1_low = vext_u16(v1_low, v1_low, 2);
v2_low = vext_u16(v2_low, v2_low, 2);
v3_low = vext_u16(v3_low, v3_low, 2);
v4_low = vext_u16(v4_low, v4_low, 2);
v5_low = vext_u16(v5_low, v5_low, 2);
v6_low = vext_u16(v6_low, v6_low, 2);
v7_low = vext_u16(v7_low, v7_low, 2);
}
if (bh & 1) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u16(o, v7_low, 0);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v6_low, 0);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v5_low, 0);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v4_low, 0);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v3_low, 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v2_low, 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v1_low, 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v0_low, 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 12,133
| 40.986159
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-multi-mov-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x16_transposec_ukernel__8x8_multi_mov_sse2(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t) - tile_hbytes;
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
const uint16_t* i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
uint16_t* o = (uint16_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_offset);
const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i1);
i1 = (uint16_t*) ((uintptr_t) i1 + input_offset);
const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i2);
i2 = (uint16_t*) ((uintptr_t) i2 + input_offset);
const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i3);
i3 = (uint16_t*) ((uintptr_t) i3 + input_offset);
const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i4);
i4 = (uint16_t*) ((uintptr_t) i4 + input_offset);
const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i5);
i5 = (uint16_t*) ((uintptr_t) i5 + input_offset);
const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i6);
i6 = (uint16_t*) ((uintptr_t) i6 + input_offset);
const __m128i v3_7 = _mm_loadu_si128((const __m128i*) i7);
i7 = (uint16_t*) ((uintptr_t) i7 + input_offset);
const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
const __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
const __m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
const __m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
const __m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
const __m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
o = (uint16_t*) ((uintptr_t) o + oN_offset);
_mm_storeu_si128((__m128i*) o, v0_7);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_6);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_5);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_4);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_3);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_2);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_1);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_0);
}
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i2);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i0;
}
const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i3);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i0;
}
const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i4);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i0;
}
const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i5);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i0;
}
const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i6);
const __m128i v3_7 = _mm_undefined_si128();
const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
__m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
__m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
__m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
__m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
__m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
__m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
if (bh & 4) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
_mm_storel_epi64((__m128i*) o, v0_7);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_6);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_5);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_4);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_3);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_2);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_1);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_0);
o += 4;
v0_0 = _mm_unpackhi_epi64(v0_0, v0_0);
v0_1 = _mm_unpackhi_epi64(v0_1, v0_1);
v0_2 = _mm_unpackhi_epi64(v0_2, v0_2);
v0_3 = _mm_unpackhi_epi64(v0_3, v0_3);
v0_4 = _mm_unpackhi_epi64(v0_4, v0_4);
v0_5 = _mm_unpackhi_epi64(v0_5, v0_5);
v0_6 = _mm_unpackhi_epi64(v0_6, v0_6);
v0_7 = _mm_unpackhi_epi64(v0_7, v0_7);
}
if (bh & 2) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_7));
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_6));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_5));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_4));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_3));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_2));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_1));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_0));
o += 2;
v0_0 = _mm_srli_epi64(v0_0, 32);
v0_1 = _mm_srli_epi64(v0_1, 32);
v0_2 = _mm_srli_epi64(v0_2, 32);
v0_3 = _mm_srli_epi64(v0_3, 32);
v0_4 = _mm_srli_epi64(v0_4, 32);
v0_5 = _mm_srli_epi64(v0_5, 32);
v0_6 = _mm_srli_epi64(v0_6, 32);
v0_7 = _mm_srli_epi64(v0_7, 32);
}
if (bh & 1) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_7));
uint16_t* oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_6));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_5));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_4));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_3));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_2));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_1));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_0));
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 14,302
| 40.21902
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-multi-mov-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <wasm_simd128.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__8x8_multi_mov_wasmsimd(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t) - tile_hbytes;
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
const uint16_t* i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
uint16_t* o = (uint16_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const v128_t v3_0 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_offset);
const v128_t v3_1 = wasm_v128_load(i1);
i1 = (uint16_t*) ((uintptr_t) i1 + input_offset);
const v128_t v3_2 = wasm_v128_load(i2);
i2 = (uint16_t*) ((uintptr_t) i2 + input_offset);
const v128_t v3_3 = wasm_v128_load(i3);
i3 = (uint16_t*) ((uintptr_t) i3 + input_offset);
const v128_t v3_4 = wasm_v128_load(i4);
i4 = (uint16_t*) ((uintptr_t) i4 + input_offset);
const v128_t v3_5 = wasm_v128_load(i5);
i5 = (uint16_t*) ((uintptr_t) i5 + input_offset);
const v128_t v3_6 = wasm_v128_load(i6);
i6 = (uint16_t*) ((uintptr_t) i6 + input_offset);
const v128_t v3_7 = wasm_v128_load(i7);
i7 = (uint16_t*) ((uintptr_t) i7 + input_offset);
const v128_t v2_0 = wasm_v16x8_shuffle(v3_0, v3_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_1 = wasm_v16x8_shuffle(v3_0, v3_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_2 = wasm_v16x8_shuffle(v3_1, v3_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_3 = wasm_v16x8_shuffle(v3_1, v3_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_4 = wasm_v16x8_shuffle(v3_2, v3_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_5 = wasm_v16x8_shuffle(v3_2, v3_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_6 = wasm_v16x8_shuffle(v3_3, v3_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_7 = wasm_v16x8_shuffle(v3_3, v3_7, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_0 = wasm_v16x8_shuffle(v2_0, v2_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_1 = wasm_v16x8_shuffle(v2_0, v2_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_2 = wasm_v16x8_shuffle(v2_1, v2_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_3 = wasm_v16x8_shuffle(v2_1, v2_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_4 = wasm_v16x8_shuffle(v2_2, v2_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_5 = wasm_v16x8_shuffle(v2_2, v2_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_6 = wasm_v16x8_shuffle(v2_3, v2_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_7 = wasm_v16x8_shuffle(v2_3, v2_7, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_0 = wasm_v16x8_shuffle(v1_0, v1_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_1 = wasm_v16x8_shuffle(v1_0, v1_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_2 = wasm_v16x8_shuffle(v1_1, v1_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_3 = wasm_v16x8_shuffle(v1_1, v1_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_4 = wasm_v16x8_shuffle(v1_2, v1_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_5 = wasm_v16x8_shuffle(v1_2, v1_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_6 = wasm_v16x8_shuffle(v1_3, v1_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_7 = wasm_v16x8_shuffle(v1_3, v1_7, 4, 12, 5, 13, 6, 14, 7, 15);
o = (uint16_t*) ((uintptr_t) o + oN_offset);
wasm_v128_store(o, v0_7);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
wasm_v128_store(o, v0_6);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
wasm_v128_store(o, v0_5);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
wasm_v128_store(o, v0_4);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
wasm_v128_store(o, v0_3);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
wasm_v128_store(o, v0_2);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
wasm_v128_store(o, v0_1);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
wasm_v128_store(o, v0_0);
}
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const v128_t v3_0 = wasm_v128_load(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const v128_t v3_1 = wasm_v128_load(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const v128_t v3_2 = wasm_v128_load(i2);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i0;
}
const v128_t v3_3 = wasm_v128_load(i3);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i0;
}
const v128_t v3_4 = wasm_v128_load(i4);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i0;
}
const v128_t v3_5 = wasm_v128_load(i5);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i0;
}
const v128_t v3_6 = wasm_v128_load(i6);
const v128_t v3_7 = wasm_v128_xor(v3_0, v3_0);
const v128_t v2_0 = wasm_v16x8_shuffle(v3_0, v3_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_1 = wasm_v16x8_shuffle(v3_0, v3_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_2 = wasm_v16x8_shuffle(v3_1, v3_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_3 = wasm_v16x8_shuffle(v3_1, v3_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_4 = wasm_v16x8_shuffle(v3_2, v3_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_5 = wasm_v16x8_shuffle(v3_2, v3_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_6 = wasm_v16x8_shuffle(v3_3, v3_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_7 = wasm_v16x8_shuffle(v3_3, v3_7, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_0 = wasm_v16x8_shuffle(v2_0, v2_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_1 = wasm_v16x8_shuffle(v2_0, v2_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_2 = wasm_v16x8_shuffle(v2_1, v2_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_3 = wasm_v16x8_shuffle(v2_1, v2_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_4 = wasm_v16x8_shuffle(v2_2, v2_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_5 = wasm_v16x8_shuffle(v2_2, v2_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_6 = wasm_v16x8_shuffle(v2_3, v2_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_7 = wasm_v16x8_shuffle(v2_3, v2_7, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_0 = wasm_v16x8_shuffle(v1_0, v1_4, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_1 = wasm_v16x8_shuffle(v1_0, v1_4, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_2 = wasm_v16x8_shuffle(v1_1, v1_5, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_3 = wasm_v16x8_shuffle(v1_1, v1_5, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_4 = wasm_v16x8_shuffle(v1_2, v1_6, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_5 = wasm_v16x8_shuffle(v1_2, v1_6, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_6 = wasm_v16x8_shuffle(v1_3, v1_7, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_7 = wasm_v16x8_shuffle(v1_3, v1_7, 4, 12, 5, 13, 6, 14, 7, 15);
if (bh & 4) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
wasm_v128_store64_lane(o, v0_7, 0);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
wasm_v128_store64_lane(o, v0_6, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
wasm_v128_store64_lane(o, v0_5, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
wasm_v128_store64_lane(o, v0_4, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
wasm_v128_store64_lane(o, v0_3, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
wasm_v128_store64_lane(o, v0_2, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
wasm_v128_store64_lane(o, v0_1, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
wasm_v128_store64_lane(o, v0_0, 0);
o += 4;
v0_0 = wasm_v64x2_shuffle(v0_0, v0_0, 1, 1);
v0_1 = wasm_v64x2_shuffle(v0_1, v0_1, 1, 1);
v0_2 = wasm_v64x2_shuffle(v0_2, v0_2, 1, 1);
v0_3 = wasm_v64x2_shuffle(v0_3, v0_3, 1, 1);
v0_4 = wasm_v64x2_shuffle(v0_4, v0_4, 1, 1);
v0_5 = wasm_v64x2_shuffle(v0_5, v0_5, 1, 1);
v0_6 = wasm_v64x2_shuffle(v0_6, v0_6, 1, 1);
v0_7 = wasm_v64x2_shuffle(v0_7, v0_7, 1, 1);
}
if (bh & 2) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
wasm_v128_store32_lane(o, v0_7, 0);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
wasm_v128_store32_lane(o, v0_6, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
wasm_v128_store32_lane(o, v0_5, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
wasm_v128_store32_lane(o, v0_4, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
wasm_v128_store32_lane(o, v0_3, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
wasm_v128_store32_lane(o, v0_2, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
wasm_v128_store32_lane(o, v0_1, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
wasm_v128_store32_lane(o, v0_0, 0);
o += 2;
v0_0 = wasm_u64x2_shr(v0_0, 32);
v0_1 = wasm_u64x2_shr(v0_1, 32);
v0_2 = wasm_u64x2_shr(v0_2, 32);
v0_3 = wasm_u64x2_shr(v0_3, 32);
v0_4 = wasm_u64x2_shr(v0_4, 32);
v0_5 = wasm_u64x2_shr(v0_5, 32);
v0_6 = wasm_u64x2_shr(v0_6, 32);
v0_7 = wasm_u64x2_shr(v0_7, 32);
}
if (bh & 1) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
wasm_v128_store16_lane(o, v0_7, 0);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
wasm_v128_store16_lane(o, v0_6, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
wasm_v128_store16_lane(o, v0_5, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
wasm_v128_store16_lane(o, v0_4, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
wasm_v128_store16_lane(o, v0_3, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
wasm_v128_store16_lane(o, v0_2, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
wasm_v128_store16_lane(o, v0_1, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
wasm_v128_store16_lane(o, v0_0, 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 14,795
| 42.390029
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-multi-mov-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__8x8_multi_mov_zip_neon(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t) - tile_hbytes;
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
const uint16_t* i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
uint16_t* o = (uint16_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const uint16x8_t v3_0 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_offset);
const uint16x8_t v3_1 = vld1q_u16(i1); i1 = (uint16_t*) ((uintptr_t) i1 + input_offset);
const uint16x8_t v3_2 = vld1q_u16(i2); i2 = (uint16_t*) ((uintptr_t) i2 + input_offset);
const uint16x8_t v3_3 = vld1q_u16(i3); i3 = (uint16_t*) ((uintptr_t) i3 + input_offset);
const uint16x8_t v3_4 = vld1q_u16(i4); i4 = (uint16_t*) ((uintptr_t) i4 + input_offset);
const uint16x8_t v3_5 = vld1q_u16(i5); i5 = (uint16_t*) ((uintptr_t) i5 + input_offset);
const uint16x8_t v3_6 = vld1q_u16(i6); i6 = (uint16_t*) ((uintptr_t) i6 + input_offset);
const uint16x8_t v3_7 = vld1q_u16(i7); i7 = (uint16_t*) ((uintptr_t) i7 + input_offset);
const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4);
const uint16x8x2_t v2_1 = vzipq_u16(v3_1, v3_5);
const uint16x8x2_t v2_2 = vzipq_u16(v3_2, v3_6);
const uint16x8x2_t v2_3 = vzipq_u16(v3_3, v3_7);
const uint16x8x2_t v1_0 = vzipq_u16(v2_0.val[0], v2_2.val[0]);
const uint16x8x2_t v1_1 = vzipq_u16(v2_0.val[1], v2_2.val[1]);
const uint16x8x2_t v1_2 = vzipq_u16(v2_1.val[0], v2_3.val[0]);
const uint16x8x2_t v1_3 = vzipq_u16(v2_1.val[1], v2_3.val[1]);
const uint16x8x2_t v0_0 = vzipq_u16(v1_0.val[0], v1_2.val[0]);
const uint16x8x2_t v0_1 = vzipq_u16(v1_0.val[1], v1_2.val[1]);
const uint16x8x2_t v0_2 = vzipq_u16(v1_1.val[0], v1_3.val[0]);
const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]);
o = (uint16_t*) ((uintptr_t) o + oN_offset);
vst1q_u16(o, v0_3.val[1]);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1q_u16(o, v0_3.val[0]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1q_u16(o, v0_2.val[1]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1q_u16(o, v0_2.val[0]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1q_u16(o, v0_1.val[1]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1q_u16(o, v0_1.val[0]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1q_u16(o, v0_0.val[1]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1q_u16(o, v0_0.val[0]);
}
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint16x8_t v3_0 = vld1q_u16(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint16x8_t v3_1 = vld1q_u16(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const uint16x8_t v3_2 = vld1q_u16(i2);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i0;
}
const uint16x8_t v3_3 = vld1q_u16(i3);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i0;
}
const uint16x8_t v3_4 = vld1q_u16(i4);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i0;
}
const uint16x8_t v3_5 = vld1q_u16(i5);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i0;
}
const uint16x8_t v3_6 = vld1q_u16(i6);
const uint16x8_t v3_7 = vmovq_n_u16(0);
const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4);
const uint16x8x2_t v2_1 = vzipq_u16(v3_1, v3_5);
const uint16x8x2_t v2_2 = vzipq_u16(v3_2, v3_6);
const uint16x8x2_t v2_3 = vzipq_u16(v3_3, v3_7);
const uint16x8x2_t v1_0 = vzipq_u16(v2_0.val[0], v2_2.val[0]);
const uint16x8x2_t v1_1 = vzipq_u16(v2_0.val[1], v2_2.val[1]);
const uint16x8x2_t v1_2 = vzipq_u16(v2_1.val[0], v2_3.val[0]);
const uint16x8x2_t v1_3 = vzipq_u16(v2_1.val[1], v2_3.val[1]);
const uint16x8x2_t v0_0 = vzipq_u16(v1_0.val[0], v1_2.val[0]);
const uint16x8x2_t v0_1 = vzipq_u16(v1_0.val[1], v1_2.val[1]);
const uint16x8x2_t v0_2 = vzipq_u16(v1_1.val[0], v1_3.val[0]);
const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]);
uint16x4_t v0_low = vget_low_u16(v0_0.val[0]);
uint16x4_t v1_low = vget_low_u16(v0_0.val[1]);
uint16x4_t v2_low = vget_low_u16(v0_1.val[0]);
uint16x4_t v3_low = vget_low_u16(v0_1.val[1]);
uint16x4_t v4_low = vget_low_u16(v0_2.val[0]);
uint16x4_t v5_low = vget_low_u16(v0_2.val[1]);
uint16x4_t v6_low = vget_low_u16(v0_3.val[0]);
uint16x4_t v7_low = vget_low_u16(v0_3.val[1]);
if (bh & 4) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_u16(o, v7_low);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1_u16(o, v6_low);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1_u16(o, v5_low);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1_u16(o, v4_low);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1_u16(o, v3_low);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_u16(o, v2_low);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_u16(o, v1_low);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_u16(o, v0_low); o += 4;
v0_low = vget_high_u16(v0_0.val[0]);
v1_low = vget_high_u16(v0_0.val[1]);
v2_low = vget_high_u16(v0_1.val[0]);
v3_low = vget_high_u16(v0_1.val[1]);
v4_low = vget_high_u16(v0_2.val[0]);
v5_low = vget_high_u16(v0_2.val[1]);
v6_low = vget_high_u16(v0_3.val[0]);
v7_low = vget_high_u16(v0_3.val[1]);
}
if (bh & 2) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v7_low), 0);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v6_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v5_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v4_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v3_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v2_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v1_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v0_low), 0); o += 2;
v0_low = vext_u16(v0_low, v0_low, 2);
v1_low = vext_u16(v1_low, v1_low, 2);
v2_low = vext_u16(v2_low, v2_low, 2);
v3_low = vext_u16(v3_low, v3_low, 2);
v4_low = vext_u16(v4_low, v4_low, 2);
v5_low = vext_u16(v5_low, v5_low, 2);
v6_low = vext_u16(v6_low, v6_low, 2);
v7_low = vext_u16(v7_low, v7_low, 2);
}
if (bh & 1) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u16(o, v7_low, 0);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1_lane_u16(o, v6_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1_lane_u16(o, v5_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1_lane_u16(o, v4_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1_lane_u16(o, v3_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u16(o, v2_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u16(o, v1_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u16(o, v0_low, 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 12,635
| 38.861199
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-multi-switch-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x16_transposec_ukernel__8x8_multi_switch_sse2(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
const uint16_t* i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
uint16_t* o = (uint16_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_offset);
const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i1);
i1 = (uint16_t*) ((uintptr_t) i1 + input_offset);
const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i2);
i2 = (uint16_t*) ((uintptr_t) i2 + input_offset);
const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i3);
i3 = (uint16_t*) ((uintptr_t) i3 + input_offset);
const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i4);
i4 = (uint16_t*) ((uintptr_t) i4 + input_offset);
const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i5);
i5 = (uint16_t*) ((uintptr_t) i5 + input_offset);
const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i6);
i6 = (uint16_t*) ((uintptr_t) i6 + input_offset);
const __m128i v3_7 = _mm_loadu_si128((const __m128i*) i7);
i7 = (uint16_t*) ((uintptr_t) i7 + input_offset);
const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
const __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
const __m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
const __m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
const __m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
const __m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
_mm_storeu_si128((__m128i*) oN, v0_7);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
_mm_storeu_si128((__m128i*) oN, v0_6);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
_mm_storeu_si128((__m128i*) oN, v0_5);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
_mm_storeu_si128((__m128i*) oN, v0_4);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
_mm_storeu_si128((__m128i*) oN, v0_3);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storeu_si128((__m128i*) oN, v0_2);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storeu_si128((__m128i*) oN, v0_1);
case 0:
_mm_storeu_si128((__m128i*) o, v0_0);
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i2);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i0;
}
const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i3);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i0;
}
const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i4);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i0;
}
const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i5);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i0;
}
const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i6);
const __m128i v3_7 = _mm_undefined_si128();
const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
__m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
__m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
__m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
__m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
__m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
__m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
if (bh & 4) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
_mm_storel_epi64((__m128i*) oN, v0_7);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
_mm_storel_epi64((__m128i*) oN, v0_6);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
_mm_storel_epi64((__m128i*) oN, v0_5);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
_mm_storel_epi64((__m128i*) oN, v0_4);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
_mm_storel_epi64((__m128i*) oN, v0_3);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storel_epi64((__m128i*) oN, v0_2);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storel_epi64((__m128i*) oN, v0_1);
case 0:
_mm_storel_epi64((__m128i*) o, v0_0);
break;
default:
XNN_UNREACHABLE;
}
o += 4;
v0_0 = _mm_unpackhi_epi64(v0_0, v0_0);
v0_1 = _mm_unpackhi_epi64(v0_1, v0_1);
v0_2 = _mm_unpackhi_epi64(v0_2, v0_2);
v0_3 = _mm_unpackhi_epi64(v0_3, v0_3);
v0_4 = _mm_unpackhi_epi64(v0_4, v0_4);
v0_5 = _mm_unpackhi_epi64(v0_5, v0_5);
v0_6 = _mm_unpackhi_epi64(v0_6, v0_6);
v0_7 = _mm_unpackhi_epi64(v0_7, v0_7);
}
if (bh & 2) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_7));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_6));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_5));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_4));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_3));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_2));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_1));
case 0:
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_0));
break;
default:
XNN_UNREACHABLE;
}
o += 2;
v0_0 = _mm_srli_epi64(v0_0, 32);
v0_1 = _mm_srli_epi64(v0_1, 32);
v0_2 = _mm_srli_epi64(v0_2, 32);
v0_3 = _mm_srli_epi64(v0_3, 32);
v0_4 = _mm_srli_epi64(v0_4, 32);
v0_5 = _mm_srli_epi64(v0_5, 32);
v0_6 = _mm_srli_epi64(v0_6, 32);
v0_7 = _mm_srli_epi64(v0_7, 32);
}
if (bh & 1) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_7));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_6));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_5));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_4));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_3));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_2));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_1));
case 0:
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_0));
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 13,093
| 41.23871
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-multi-switch-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <wasm_simd128.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__8x8_multi_switch_wasmsimd(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
const uint16_t* i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
uint16_t* o = (uint16_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const v128_t v3_0 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_offset);
const v128_t v3_1 = wasm_v128_load(i1);
i1 = (uint16_t*) ((uintptr_t) i1 + input_offset);
const v128_t v3_2 = wasm_v128_load(i2);
i2 = (uint16_t*) ((uintptr_t) i2 + input_offset);
const v128_t v3_3 = wasm_v128_load(i3);
i3 = (uint16_t*) ((uintptr_t) i3 + input_offset);
const v128_t v3_4 = wasm_v128_load(i4);
i4 = (uint16_t*) ((uintptr_t) i4 + input_offset);
const v128_t v3_5 = wasm_v128_load(i5);
i5 = (uint16_t*) ((uintptr_t) i5 + input_offset);
const v128_t v3_6 = wasm_v128_load(i6);
i6 = (uint16_t*) ((uintptr_t) i6 + input_offset);
const v128_t v3_7 = wasm_v128_load(i7);
i7 = (uint16_t*) ((uintptr_t) i7 + input_offset);
const v128_t v2_0 = wasm_v16x8_shuffle(v3_0, v3_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_1 = wasm_v16x8_shuffle(v3_0, v3_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_2 = wasm_v16x8_shuffle(v3_1, v3_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_3 = wasm_v16x8_shuffle(v3_1, v3_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_4 = wasm_v16x8_shuffle(v3_2, v3_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_5 = wasm_v16x8_shuffle(v3_2, v3_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_6 = wasm_v16x8_shuffle(v3_3, v3_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_7 = wasm_v16x8_shuffle(v3_3, v3_7, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_0 = wasm_v16x8_shuffle(v2_0, v2_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_1 = wasm_v16x8_shuffle(v2_0, v2_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_2 = wasm_v16x8_shuffle(v2_1, v2_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_3 = wasm_v16x8_shuffle(v2_1, v2_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_4 = wasm_v16x8_shuffle(v2_2, v2_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_5 = wasm_v16x8_shuffle(v2_2, v2_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_6 = wasm_v16x8_shuffle(v2_3, v2_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_7 = wasm_v16x8_shuffle(v2_3, v2_7, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_0 = wasm_v16x8_shuffle(v1_0, v1_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_1 = wasm_v16x8_shuffle(v1_0, v1_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_2 = wasm_v16x8_shuffle(v1_1, v1_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_3 = wasm_v16x8_shuffle(v1_1, v1_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_4 = wasm_v16x8_shuffle(v1_2, v1_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_5 = wasm_v16x8_shuffle(v1_2, v1_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_6 = wasm_v16x8_shuffle(v1_3, v1_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_7 = wasm_v16x8_shuffle(v1_3, v1_7, 4, 12, 5, 13, 6, 14, 7, 15);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
wasm_v128_store(oN, v0_7);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
wasm_v128_store(oN, v0_6);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
wasm_v128_store(oN, v0_5);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
wasm_v128_store(oN, v0_4);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
wasm_v128_store(oN, v0_3);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
wasm_v128_store(oN, v0_2);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
wasm_v128_store(oN, v0_1);
case 0:
wasm_v128_store(o, v0_0);
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const v128_t v3_0 = wasm_v128_load(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const v128_t v3_1 = wasm_v128_load(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const v128_t v3_2 = wasm_v128_load(i2);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i0;
}
const v128_t v3_3 = wasm_v128_load(i3);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i0;
}
const v128_t v3_4 = wasm_v128_load(i4);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i0;
}
const v128_t v3_5 = wasm_v128_load(i5);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i0;
}
const v128_t v3_6 = wasm_v128_load(i6);
const v128_t v3_7 = wasm_v128_xor(v3_0, v3_0);
const v128_t v2_0 = wasm_v16x8_shuffle(v3_0, v3_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_1 = wasm_v16x8_shuffle(v3_0, v3_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_2 = wasm_v16x8_shuffle(v3_1, v3_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_3 = wasm_v16x8_shuffle(v3_1, v3_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_4 = wasm_v16x8_shuffle(v3_2, v3_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_5 = wasm_v16x8_shuffle(v3_2, v3_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_6 = wasm_v16x8_shuffle(v3_3, v3_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_7 = wasm_v16x8_shuffle(v3_3, v3_7, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_0 = wasm_v16x8_shuffle(v2_0, v2_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_1 = wasm_v16x8_shuffle(v2_0, v2_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_2 = wasm_v16x8_shuffle(v2_1, v2_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_3 = wasm_v16x8_shuffle(v2_1, v2_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_4 = wasm_v16x8_shuffle(v2_2, v2_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_5 = wasm_v16x8_shuffle(v2_2, v2_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_6 = wasm_v16x8_shuffle(v2_3, v2_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_7 = wasm_v16x8_shuffle(v2_3, v2_7, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_0 = wasm_v16x8_shuffle(v1_0, v1_4, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_1 = wasm_v16x8_shuffle(v1_0, v1_4, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_2 = wasm_v16x8_shuffle(v1_1, v1_5, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_3 = wasm_v16x8_shuffle(v1_1, v1_5, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_4 = wasm_v16x8_shuffle(v1_2, v1_6, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_5 = wasm_v16x8_shuffle(v1_2, v1_6, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_6 = wasm_v16x8_shuffle(v1_3, v1_7, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_7 = wasm_v16x8_shuffle(v1_3, v1_7, 4, 12, 5, 13, 6, 14, 7, 15);
if (bh & 4) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
wasm_v128_store64_lane(oN, v0_7, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
wasm_v128_store64_lane(oN, v0_6, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
wasm_v128_store64_lane(oN, v0_5, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
wasm_v128_store64_lane(oN, v0_4, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
wasm_v128_store64_lane(oN, v0_3, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
wasm_v128_store64_lane(oN, v0_2, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
wasm_v128_store64_lane(oN, v0_1, 0);
case 0:
wasm_v128_store64_lane(o, v0_0, 0);
o += 4;
break;
default:
XNN_UNREACHABLE;
}
v0_0 = wasm_v64x2_shuffle(v0_0, v0_0, 1, 1);
v0_1 = wasm_v64x2_shuffle(v0_1, v0_1, 1, 1);
v0_2 = wasm_v64x2_shuffle(v0_2, v0_2, 1, 1);
v0_3 = wasm_v64x2_shuffle(v0_3, v0_3, 1, 1);
v0_4 = wasm_v64x2_shuffle(v0_4, v0_4, 1, 1);
v0_5 = wasm_v64x2_shuffle(v0_5, v0_5, 1, 1);
v0_6 = wasm_v64x2_shuffle(v0_6, v0_6, 1, 1);
v0_7 = wasm_v64x2_shuffle(v0_7, v0_7, 1, 1);
}
if (bh & 2) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
wasm_v128_store32_lane(oN, v0_7, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
wasm_v128_store32_lane(oN, v0_6, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
wasm_v128_store32_lane(oN, v0_5, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
wasm_v128_store32_lane(oN, v0_4, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
wasm_v128_store32_lane(oN, v0_3, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
wasm_v128_store32_lane(oN, v0_2, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
wasm_v128_store32_lane(oN, v0_1, 0);
case 0:
wasm_v128_store32_lane(o, v0_0, 0);
o += 2;
break;
default:
XNN_UNREACHABLE;
}
v0_0 = wasm_u64x2_shr(v0_0, 32);
v0_1 = wasm_u64x2_shr(v0_1, 32);
v0_2 = wasm_u64x2_shr(v0_2, 32);
v0_3 = wasm_u64x2_shr(v0_3, 32);
v0_4 = wasm_u64x2_shr(v0_4, 32);
v0_5 = wasm_u64x2_shr(v0_5, 32);
v0_6 = wasm_u64x2_shr(v0_6, 32);
v0_7 = wasm_u64x2_shr(v0_7, 32);
}
if (bh & 1) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
wasm_v128_store16_lane(oN, v0_7, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
wasm_v128_store16_lane(oN, v0_6, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
wasm_v128_store16_lane(oN, v0_5, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
wasm_v128_store16_lane(oN, v0_4, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
wasm_v128_store16_lane(oN, v0_3, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
wasm_v128_store16_lane(oN, v0_2, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
wasm_v128_store16_lane(oN, v0_1, 0);
case 0:
wasm_v128_store16_lane(o, v0_0, 0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 13,594
| 43.720395
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-multi-switch-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__8x8_multi_switch_zip_neon(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t input_offset = tile_height * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
const uint16_t* i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
uint16_t* o = (uint16_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const uint16x8_t v3_0 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_offset);
const uint16x8_t v3_1 = vld1q_u16(i1); i1 = (uint16_t*) ((uintptr_t) i1 + input_offset);
const uint16x8_t v3_2 = vld1q_u16(i2); i2 = (uint16_t*) ((uintptr_t) i2 + input_offset);
const uint16x8_t v3_3 = vld1q_u16(i3); i3 = (uint16_t*) ((uintptr_t) i3 + input_offset);
const uint16x8_t v3_4 = vld1q_u16(i4); i4 = (uint16_t*) ((uintptr_t) i4 + input_offset);
const uint16x8_t v3_5 = vld1q_u16(i5); i5 = (uint16_t*) ((uintptr_t) i5 + input_offset);
const uint16x8_t v3_6 = vld1q_u16(i6); i6 = (uint16_t*) ((uintptr_t) i6 + input_offset);
const uint16x8_t v3_7 = vld1q_u16(i7); i7 = (uint16_t*) ((uintptr_t) i7 + input_offset);
const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4);
const uint16x8x2_t v2_1 = vzipq_u16(v3_1, v3_5);
const uint16x8x2_t v2_2 = vzipq_u16(v3_2, v3_6);
const uint16x8x2_t v2_3 = vzipq_u16(v3_3, v3_7);
const uint16x8x2_t v1_0 = vzipq_u16(v2_0.val[0], v2_2.val[0]);
const uint16x8x2_t v1_1 = vzipq_u16(v2_0.val[1], v2_2.val[1]);
const uint16x8x2_t v1_2 = vzipq_u16(v2_1.val[0], v2_3.val[0]);
const uint16x8x2_t v1_3 = vzipq_u16(v2_1.val[1], v2_3.val[1]);
const uint16x8x2_t v0_0 = vzipq_u16(v1_0.val[0], v1_2.val[0]);
const uint16x8x2_t v0_1 = vzipq_u16(v1_0.val[1], v1_2.val[1]);
const uint16x8x2_t v0_2 = vzipq_u16(v1_1.val[0], v1_3.val[0]);
const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1q_u16(oN, v0_3.val[1]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1q_u16(oN, v0_3.val[0]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1q_u16(oN, v0_2.val[1]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1q_u16(oN, v0_2.val[0]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1q_u16(oN, v0_1.val[1]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1q_u16(oN, v0_1.val[0]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1q_u16(oN, v0_0.val[1]);
case 0:
vst1q_u16(o, v0_0.val[0]); o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const uint16x8_t v3_0 = vld1q_u16(i0);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint16x8_t v3_1 = vld1q_u16(i1);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
const uint16x8_t v3_2 = vld1q_u16(i2);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i0;
}
const uint16x8_t v3_3 = vld1q_u16(i3);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i0;
}
const uint16x8_t v3_4 = vld1q_u16(i4);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i0;
}
const uint16x8_t v3_5 = vld1q_u16(i5);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i0;
}
const uint16x8_t v3_6 = vld1q_u16(i6);
const uint16x8_t v3_7 = vmovq_n_u16(0);
const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4);
const uint16x8x2_t v2_1 = vzipq_u16(v3_1, v3_5);
const uint16x8x2_t v2_2 = vzipq_u16(v3_2, v3_6);
const uint16x8x2_t v2_3 = vzipq_u16(v3_3, v3_7);
const uint16x8x2_t v1_0 = vzipq_u16(v2_0.val[0], v2_2.val[0]);
const uint16x8x2_t v1_1 = vzipq_u16(v2_0.val[1], v2_2.val[1]);
const uint16x8x2_t v1_2 = vzipq_u16(v2_1.val[0], v2_3.val[0]);
const uint16x8x2_t v1_3 = vzipq_u16(v2_1.val[1], v2_3.val[1]);
const uint16x8x2_t v0_0 = vzipq_u16(v1_0.val[0], v1_2.val[0]);
const uint16x8x2_t v0_1 = vzipq_u16(v1_0.val[1], v1_2.val[1]);
const uint16x8x2_t v0_2 = vzipq_u16(v1_1.val[0], v1_3.val[0]);
const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]);
uint16x4_t v0_low = vget_low_u16(v0_0.val[0]);
uint16x4_t v1_low = vget_low_u16(v0_0.val[1]);
uint16x4_t v2_low = vget_low_u16(v0_1.val[0]);
uint16x4_t v3_low = vget_low_u16(v0_1.val[1]);
uint16x4_t v4_low = vget_low_u16(v0_2.val[0]);
uint16x4_t v5_low = vget_low_u16(v0_2.val[1]);
uint16x4_t v6_low = vget_low_u16(v0_3.val[0]);
uint16x4_t v7_low = vget_low_u16(v0_3.val[1]);
if (bh & 4) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1_u16(oN, v7_low); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_u16(oN, v6_low); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_u16(oN, v5_low); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_u16(oN, v4_low); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_u16(oN, v3_low); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_u16(oN, v2_low); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_u16(oN, v1_low);
case 0:
vst1_u16(o, v0_low); o += 4;
break;
default:
XNN_UNREACHABLE;
}
v0_low = vget_high_u16(v0_0.val[0]);
v1_low = vget_high_u16(v0_0.val[1]);
v2_low = vget_high_u16(v0_1.val[0]);
v3_low = vget_high_u16(v0_1.val[1]);
v4_low = vget_high_u16(v0_2.val[0]);
v5_low = vget_high_u16(v0_2.val[1]);
v6_low = vget_high_u16(v0_3.val[0]);
v7_low = vget_high_u16(v0_3.val[1]);
}
if (bh & 2) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v7_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v6_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v5_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v4_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v3_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v2_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v1_low), 0);
case 0:
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v0_low), 0); o += 2;
break;
default:
XNN_UNREACHABLE;
}
v0_low = vext_u16(v0_low, v0_low, 2);
v1_low = vext_u16(v1_low, v1_low, 2);
v2_low = vext_u16(v2_low, v2_low, 2);
v3_low = vext_u16(v3_low, v3_low, 2);
v4_low = vext_u16(v4_low, v4_low, 2);
v5_low = vext_u16(v5_low, v5_low, 2);
v6_low = vext_u16(v6_low, v6_low, 2);
v7_low = vext_u16(v7_low, v7_low, 2);
}
if (bh & 1) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1_lane_u16(oN, v7_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_lane_u16(oN, v6_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_lane_u16(oN, v5_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_lane_u16(oN, v4_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_lane_u16(oN, v3_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u16(oN, v2_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u16(oN, v1_low, 0);
case 0:
vst1_lane_u16(o, v0_low, 0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 11,140
| 42.690196
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-reuse-dec-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t) - tile_hbytes;
const uint16_t* i0 = input;
uint16_t* o = (uint16_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const uint16x8_t v3_0 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_1 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_2 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_3 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_4 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_5 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_6 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_7 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4);
const uint16x8x2_t v2_1 = vzipq_u16(v3_1, v3_5);
const uint16x8x2_t v2_2 = vzipq_u16(v3_2, v3_6);
const uint16x8x2_t v2_3 = vzipq_u16(v3_3, v3_7);
const uint16x8x2_t v1_0 = vzipq_u16(v2_0.val[0], v2_2.val[0]);
const uint16x8x2_t v1_1 = vzipq_u16(v2_0.val[1], v2_2.val[1]);
const uint16x8x2_t v1_2 = vzipq_u16(v2_1.val[0], v2_3.val[0]);
const uint16x8x2_t v1_3 = vzipq_u16(v2_1.val[1], v2_3.val[1]);
const uint16x8x2_t v0_0 = vzipq_u16(v1_0.val[0], v1_2.val[0]);
const uint16x8x2_t v0_1 = vzipq_u16(v1_0.val[1], v1_2.val[1]);
const uint16x8x2_t v0_2 = vzipq_u16(v1_1.val[0], v1_3.val[0]);
const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]);
o = (uint16_t*) ((uintptr_t) o + oN_offset);
vst1q_u16(o, v0_3.val[1]);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u16(o, v0_3.val[0]);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u16(o, v0_2.val[1]);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u16(o, v0_2.val[0]);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u16(o, v0_1.val[1]);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u16(o, v0_1.val[0]);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u16(o, v0_0.val[1]);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1q_u16(o, v0_0.val[0]);
}
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint16x8_t v3_0 = vld1q_u16(i0);
const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint16x8_t v3_1 = vld1q_u16(i1);
const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint16x8_t v3_2 = vld1q_u16(i2);
const uint16_t *i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const uint16x8_t v3_3 = vld1q_u16(i3);
const uint16_t *i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const uint16x8_t v3_4 = vld1q_u16(i4);
const uint16_t *i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const uint16x8_t v3_5 = vld1q_u16(i5);
const uint16_t *i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const uint16x8_t v3_6 = vld1q_u16(i6);
const uint16x8_t v3_7 = vmovq_n_u16(0);
const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4);
const uint16x8x2_t v2_1 = vzipq_u16(v3_1, v3_5);
const uint16x8x2_t v2_2 = vzipq_u16(v3_2, v3_6);
const uint16x8x2_t v2_3 = vzipq_u16(v3_3, v3_7);
const uint16x8x2_t v1_0 = vzipq_u16(v2_0.val[0], v2_2.val[0]);
const uint16x8x2_t v1_1 = vzipq_u16(v2_0.val[1], v2_2.val[1]);
const uint16x8x2_t v1_2 = vzipq_u16(v2_1.val[0], v2_3.val[0]);
const uint16x8x2_t v1_3 = vzipq_u16(v2_1.val[1], v2_3.val[1]);
const uint16x8x2_t v0_0 = vzipq_u16(v1_0.val[0], v1_2.val[0]);
const uint16x8x2_t v0_1 = vzipq_u16(v1_0.val[1], v1_2.val[1]);
const uint16x8x2_t v0_2 = vzipq_u16(v1_1.val[0], v1_3.val[0]);
const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]);
uint16x4_t v0_low = vget_low_u16(v0_0.val[0]);
uint16x4_t v1_low = vget_low_u16(v0_0.val[1]);
uint16x4_t v2_low = vget_low_u16(v0_1.val[0]);
uint16x4_t v3_low = vget_low_u16(v0_1.val[1]);
uint16x4_t v4_low = vget_low_u16(v0_2.val[0]);
uint16x4_t v5_low = vget_low_u16(v0_2.val[1]);
uint16x4_t v6_low = vget_low_u16(v0_3.val[0]);
uint16x4_t v7_low = vget_low_u16(v0_3.val[1]);
if (bh & 4) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_u16(o, v7_low);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v6_low);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v5_low);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v4_low);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v3_low);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v2_low);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v1_low);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_u16(o, v0_low); o += 4;
v0_low = vget_high_u16(v0_0.val[0]);
v1_low = vget_high_u16(v0_0.val[1]);
v2_low = vget_high_u16(v0_1.val[0]);
v3_low = vget_high_u16(v0_1.val[1]);
v4_low = vget_high_u16(v0_2.val[0]);
v5_low = vget_high_u16(v0_2.val[1]);
v6_low = vget_high_u16(v0_3.val[0]);
v7_low = vget_high_u16(v0_3.val[1]);
}
if (bh & 2) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v7_low), 0);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v6_low), 0);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v5_low), 0);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v4_low), 0);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v3_low), 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v2_low), 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v1_low), 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v0_low), 0); o += 2;
v0_low = vext_u16(v0_low, v0_low, 2);
v1_low = vext_u16(v1_low, v1_low, 2);
v2_low = vext_u16(v2_low, v2_low, 2);
v3_low = vext_u16(v3_low, v3_low, 2);
v4_low = vext_u16(v4_low, v4_low, 2);
v5_low = vext_u16(v5_low, v5_low, 2);
v6_low = vext_u16(v6_low, v6_low, 2);
v7_low = vext_u16(v7_low, v7_low, 2);
}
if (bh & 1) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u16(o, v7_low, 0);
if XNN_UNPREDICTABLE(block_width > 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v6_low, 0);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v5_low, 0);
if XNN_UNPREDICTABLE(block_width > 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v4_low, 0);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v3_low, 0);
if XNN_UNPREDICTABLE(block_width > 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v2_low, 0);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v1_low, 0);
if XNN_UNPREDICTABLE(block_width > 1) {
o = (uint16_t*) ((uintptr_t) o + minus_output_stride);
}
vst1_lane_u16(o, v0_low, 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 11,605
| 40.45
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-reuse-mov-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x16_transposec_ukernel__8x8_reuse_mov_sse2(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t) - tile_hbytes;
const uint16_t* i0 = input;
uint16_t* o = (uint16_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_7 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
const __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
const __m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
const __m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
const __m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
const __m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
o = (uint16_t*) ((uintptr_t) o + oN_offset);
_mm_storeu_si128((__m128i*) o, v0_7);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_6);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_5);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_4);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_3);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_2);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_1);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storeu_si128((__m128i*) o, v0_0);
}
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i1);
const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i2);
const uint16_t *i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i3);
const uint16_t *i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i4);
const uint16_t *i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i5);
const uint16_t *i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i6);
const __m128i v3_7 = _mm_undefined_si128();
const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
__m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
__m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
__m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
__m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
__m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
__m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
if (bh & 4) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
_mm_storel_epi64((__m128i*) o, v0_7);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_6);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_5);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_4);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_3);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_2);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_1);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
_mm_storel_epi64((__m128i*) o, v0_0);
o += 4;
v0_0 = _mm_unpackhi_epi64(v0_0, v0_0);
v0_1 = _mm_unpackhi_epi64(v0_1, v0_1);
v0_2 = _mm_unpackhi_epi64(v0_2, v0_2);
v0_3 = _mm_unpackhi_epi64(v0_3, v0_3);
v0_4 = _mm_unpackhi_epi64(v0_4, v0_4);
v0_5 = _mm_unpackhi_epi64(v0_5, v0_5);
v0_6 = _mm_unpackhi_epi64(v0_6, v0_6);
v0_7 = _mm_unpackhi_epi64(v0_7, v0_7);
}
if (bh & 2) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_7));
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_6));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_5));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_4));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_3));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_2));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_1));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_0));
o += 2;
v0_0 = _mm_srli_epi64(v0_0, 32);
v0_1 = _mm_srli_epi64(v0_1, 32);
v0_2 = _mm_srli_epi64(v0_2, 32);
v0_3 = _mm_srli_epi64(v0_3, 32);
v0_4 = _mm_srli_epi64(v0_4, 32);
v0_5 = _mm_srli_epi64(v0_5, 32);
v0_6 = _mm_srli_epi64(v0_6, 32);
v0_7 = _mm_srli_epi64(v0_7, 32);
}
if (bh & 1) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_7));
uint16_t* oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_6));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_5));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_4));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_3));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_2));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_1));
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_0));
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 13,774
| 39.754438
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-reuse-mov-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <wasm_simd128.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__8x8_reuse_mov_wasmsimd(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t) - tile_hbytes;
const uint16_t* i0 = input;
uint16_t* o = (uint16_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const v128_t v3_0 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_1 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_2 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_3 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_4 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_5 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_6 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_7 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v2_0 = wasm_v16x8_shuffle(v3_0, v3_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_1 = wasm_v16x8_shuffle(v3_0, v3_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_2 = wasm_v16x8_shuffle(v3_1, v3_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_3 = wasm_v16x8_shuffle(v3_1, v3_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_4 = wasm_v16x8_shuffle(v3_2, v3_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_5 = wasm_v16x8_shuffle(v3_2, v3_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_6 = wasm_v16x8_shuffle(v3_3, v3_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_7 = wasm_v16x8_shuffle(v3_3, v3_7, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_0 = wasm_v16x8_shuffle(v2_0, v2_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_1 = wasm_v16x8_shuffle(v2_0, v2_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_2 = wasm_v16x8_shuffle(v2_1, v2_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_3 = wasm_v16x8_shuffle(v2_1, v2_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_4 = wasm_v16x8_shuffle(v2_2, v2_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_5 = wasm_v16x8_shuffle(v2_2, v2_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_6 = wasm_v16x8_shuffle(v2_3, v2_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_7 = wasm_v16x8_shuffle(v2_3, v2_7, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_0 = wasm_v16x8_shuffle(v1_0, v1_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_1 = wasm_v16x8_shuffle(v1_0, v1_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_2 = wasm_v16x8_shuffle(v1_1, v1_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_3 = wasm_v16x8_shuffle(v1_1, v1_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_4 = wasm_v16x8_shuffle(v1_2, v1_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_5 = wasm_v16x8_shuffle(v1_2, v1_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_6 = wasm_v16x8_shuffle(v1_3, v1_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_7 = wasm_v16x8_shuffle(v1_3, v1_7, 4, 12, 5, 13, 6, 14, 7, 15);
o = (uint16_t*) ((uintptr_t) o + oN_offset);
wasm_v128_store(o, v0_7);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
wasm_v128_store(o, v0_6);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
wasm_v128_store(o, v0_5);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
wasm_v128_store(o, v0_4);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
wasm_v128_store(o, v0_3);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
wasm_v128_store(o, v0_2);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
wasm_v128_store(o, v0_1);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
wasm_v128_store(o, v0_0);
}
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const v128_t v3_0 = wasm_v128_load(i0);
const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const v128_t v3_1 = wasm_v128_load(i1);
const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const v128_t v3_2 = wasm_v128_load(i2);
const uint16_t *i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const v128_t v3_3 = wasm_v128_load(i3);
const uint16_t *i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const v128_t v3_4 = wasm_v128_load(i4);
const uint16_t *i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const v128_t v3_5 = wasm_v128_load(i5);
const uint16_t *i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const v128_t v3_6 = wasm_v128_load(i6);
const v128_t v3_7 = wasm_v128_xor(v3_0, v3_0);
const v128_t v2_0 = wasm_v16x8_shuffle(v3_0, v3_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_1 = wasm_v16x8_shuffle(v3_0, v3_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_2 = wasm_v16x8_shuffle(v3_1, v3_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_3 = wasm_v16x8_shuffle(v3_1, v3_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_4 = wasm_v16x8_shuffle(v3_2, v3_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_5 = wasm_v16x8_shuffle(v3_2, v3_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_6 = wasm_v16x8_shuffle(v3_3, v3_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_7 = wasm_v16x8_shuffle(v3_3, v3_7, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_0 = wasm_v16x8_shuffle(v2_0, v2_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_1 = wasm_v16x8_shuffle(v2_0, v2_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_2 = wasm_v16x8_shuffle(v2_1, v2_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_3 = wasm_v16x8_shuffle(v2_1, v2_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_4 = wasm_v16x8_shuffle(v2_2, v2_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_5 = wasm_v16x8_shuffle(v2_2, v2_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_6 = wasm_v16x8_shuffle(v2_3, v2_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_7 = wasm_v16x8_shuffle(v2_3, v2_7, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_0 = wasm_v16x8_shuffle(v1_0, v1_4, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_1 = wasm_v16x8_shuffle(v1_0, v1_4, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_2 = wasm_v16x8_shuffle(v1_1, v1_5, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_3 = wasm_v16x8_shuffle(v1_1, v1_5, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_4 = wasm_v16x8_shuffle(v1_2, v1_6, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_5 = wasm_v16x8_shuffle(v1_2, v1_6, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_6 = wasm_v16x8_shuffle(v1_3, v1_7, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_7 = wasm_v16x8_shuffle(v1_3, v1_7, 4, 12, 5, 13, 6, 14, 7, 15);
if (bh & 4) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
wasm_v128_store64_lane(o, v0_7, 0);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
wasm_v128_store64_lane(o, v0_6, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
wasm_v128_store64_lane(o, v0_5, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
wasm_v128_store64_lane(o, v0_4, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
wasm_v128_store64_lane(o, v0_3, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
wasm_v128_store64_lane(o, v0_2, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
wasm_v128_store64_lane(o, v0_1, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
wasm_v128_store64_lane(o, v0_0, 0);
o += 4;
v0_0 = wasm_v64x2_shuffle(v0_0, v0_0, 1, 1);
v0_1 = wasm_v64x2_shuffle(v0_1, v0_1, 1, 1);
v0_2 = wasm_v64x2_shuffle(v0_2, v0_2, 1, 1);
v0_3 = wasm_v64x2_shuffle(v0_3, v0_3, 1, 1);
v0_4 = wasm_v64x2_shuffle(v0_4, v0_4, 1, 1);
v0_5 = wasm_v64x2_shuffle(v0_5, v0_5, 1, 1);
v0_6 = wasm_v64x2_shuffle(v0_6, v0_6, 1, 1);
v0_7 = wasm_v64x2_shuffle(v0_7, v0_7, 1, 1);
}
if (bh & 2) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
wasm_v128_store32_lane(o, v0_7, 0);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
wasm_v128_store32_lane(o, v0_6, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
wasm_v128_store32_lane(o, v0_5, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
wasm_v128_store32_lane(o, v0_4, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
wasm_v128_store32_lane(o, v0_3, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
wasm_v128_store32_lane(o, v0_2, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
wasm_v128_store32_lane(o, v0_1, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
wasm_v128_store32_lane(o, v0_0, 0);
o += 2;
v0_0 = wasm_u64x2_shr(v0_0, 32);
v0_1 = wasm_u64x2_shr(v0_1, 32);
v0_2 = wasm_u64x2_shr(v0_2, 32);
v0_3 = wasm_u64x2_shr(v0_3, 32);
v0_4 = wasm_u64x2_shr(v0_4, 32);
v0_5 = wasm_u64x2_shr(v0_5, 32);
v0_6 = wasm_u64x2_shr(v0_6, 32);
v0_7 = wasm_u64x2_shr(v0_7, 32);
}
if (bh & 1) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
wasm_v128_store16_lane(o, v0_7, 0);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
wasm_v128_store16_lane(o, v0_6, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
wasm_v128_store16_lane(o, v0_5, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
wasm_v128_store16_lane(o, v0_4, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
wasm_v128_store16_lane(o, v0_3, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
wasm_v128_store16_lane(o, v0_2, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
wasm_v128_store16_lane(o, v0_1, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
wasm_v128_store16_lane(o, v0_0, 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 14,267
| 41.975904
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-reuse-mov-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__8x8_reuse_mov_zip_neon(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t) - tile_hbytes;
const uint16_t* i0 = input;
uint16_t* o = (uint16_t*) ((uintptr_t) output - tile_hbytes);
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
const size_t oN_offset = oN_stride + tile_hbytes;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const uint16x8_t v3_0 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_1 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_2 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_3 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_4 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_5 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_6 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_7 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4);
const uint16x8x2_t v2_1 = vzipq_u16(v3_1, v3_5);
const uint16x8x2_t v2_2 = vzipq_u16(v3_2, v3_6);
const uint16x8x2_t v2_3 = vzipq_u16(v3_3, v3_7);
const uint16x8x2_t v1_0 = vzipq_u16(v2_0.val[0], v2_2.val[0]);
const uint16x8x2_t v1_1 = vzipq_u16(v2_0.val[1], v2_2.val[1]);
const uint16x8x2_t v1_2 = vzipq_u16(v2_1.val[0], v2_3.val[0]);
const uint16x8x2_t v1_3 = vzipq_u16(v2_1.val[1], v2_3.val[1]);
const uint16x8x2_t v0_0 = vzipq_u16(v1_0.val[0], v1_2.val[0]);
const uint16x8x2_t v0_1 = vzipq_u16(v1_0.val[1], v1_2.val[1]);
const uint16x8x2_t v0_2 = vzipq_u16(v1_1.val[0], v1_3.val[0]);
const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]);
o = (uint16_t*) ((uintptr_t) o + oN_offset);
vst1q_u16(o, v0_3.val[1]);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1q_u16(o, v0_3.val[0]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1q_u16(o, v0_2.val[1]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1q_u16(o, v0_2.val[0]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1q_u16(o, v0_1.val[1]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1q_u16(o, v0_1.val[0]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1q_u16(o, v0_0.val[1]);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1q_u16(o, v0_0.val[0]);
}
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
if (bh != 0) {
const uint16x8_t v3_0 = vld1q_u16(i0);
const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint16x8_t v3_1 = vld1q_u16(i1);
const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint16x8_t v3_2 = vld1q_u16(i2);
const uint16_t *i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const uint16x8_t v3_3 = vld1q_u16(i3);
const uint16_t *i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const uint16x8_t v3_4 = vld1q_u16(i4);
const uint16_t *i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const uint16x8_t v3_5 = vld1q_u16(i5);
const uint16_t *i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const uint16x8_t v3_6 = vld1q_u16(i6);
const uint16x8_t v3_7 = vmovq_n_u16(0);
const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4);
const uint16x8x2_t v2_1 = vzipq_u16(v3_1, v3_5);
const uint16x8x2_t v2_2 = vzipq_u16(v3_2, v3_6);
const uint16x8x2_t v2_3 = vzipq_u16(v3_3, v3_7);
const uint16x8x2_t v1_0 = vzipq_u16(v2_0.val[0], v2_2.val[0]);
const uint16x8x2_t v1_1 = vzipq_u16(v2_0.val[1], v2_2.val[1]);
const uint16x8x2_t v1_2 = vzipq_u16(v2_1.val[0], v2_3.val[0]);
const uint16x8x2_t v1_3 = vzipq_u16(v2_1.val[1], v2_3.val[1]);
const uint16x8x2_t v0_0 = vzipq_u16(v1_0.val[0], v1_2.val[0]);
const uint16x8x2_t v0_1 = vzipq_u16(v1_0.val[1], v1_2.val[1]);
const uint16x8x2_t v0_2 = vzipq_u16(v1_1.val[0], v1_3.val[0]);
const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]);
uint16x4_t v0_low = vget_low_u16(v0_0.val[0]);
uint16x4_t v1_low = vget_low_u16(v0_0.val[1]);
uint16x4_t v2_low = vget_low_u16(v0_1.val[0]);
uint16x4_t v3_low = vget_low_u16(v0_1.val[1]);
uint16x4_t v4_low = vget_low_u16(v0_2.val[0]);
uint16x4_t v5_low = vget_low_u16(v0_2.val[1]);
uint16x4_t v6_low = vget_low_u16(v0_3.val[0]);
uint16x4_t v7_low = vget_low_u16(v0_3.val[1]);
if (bh & 4) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_u16(o, v7_low);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1_u16(o, v6_low);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1_u16(o, v5_low);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1_u16(o, v4_low);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1_u16(o, v3_low);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_u16(o, v2_low);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_u16(o, v1_low);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_u16(o, v0_low); o += 4;
v0_low = vget_high_u16(v0_0.val[0]);
v1_low = vget_high_u16(v0_0.val[1]);
v2_low = vget_high_u16(v0_1.val[0]);
v3_low = vget_high_u16(v0_1.val[1]);
v4_low = vget_high_u16(v0_2.val[0]);
v5_low = vget_high_u16(v0_2.val[1]);
v6_low = vget_high_u16(v0_3.val[0]);
v7_low = vget_high_u16(v0_3.val[1]);
}
if (bh & 2) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v7_low), 0);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v6_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v5_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v4_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v3_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v2_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v1_low), 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v0_low), 0); o += 2;
v0_low = vext_u16(v0_low, v0_low, 2);
v1_low = vext_u16(v1_low, v1_low, 2);
v2_low = vext_u16(v2_low, v2_low, 2);
v3_low = vext_u16(v3_low, v3_low, 2);
v4_low = vext_u16(v4_low, v4_low, 2);
v5_low = vext_u16(v5_low, v5_low, 2);
v6_low = vext_u16(v6_low, v6_low, 2);
v7_low = vext_u16(v7_low, v7_low, 2);
}
if (bh & 1) {
o = (uint16_t*) ((uintptr_t) o + oN_stride);
vst1_lane_u16(o, v7_low, 0);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 7) {
o = oN;
}
vst1_lane_u16(o, v6_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 7) {
o = oN;
}
vst1_lane_u16(o, v5_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 5) {
o = oN;
}
vst1_lane_u16(o, v4_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 5) {
o = oN;
}
vst1_lane_u16(o, v3_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 3) {
o = oN;
}
vst1_lane_u16(o, v2_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width >= 3) {
o = oN;
}
vst1_lane_u16(o, v1_low, 0);
oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
if XNN_UNPREDICTABLE(block_width > 1) {
o = oN;
}
vst1_lane_u16(o, v0_low, 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 12,107
| 38.311688
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-reuse-multi-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x16_transposec_ukernel__8x8_reuse_multi_sse2(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
const uint16_t* i0 = input;
uint16_t* o0 = (uint16_t*) output;
uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
uint16_t* o2 = (uint16_t*) ((uintptr_t) o1 + output_stride);
uint16_t* o3 = (uint16_t*) ((uintptr_t) o2 + output_stride);
uint16_t* o4 = (uint16_t*) ((uintptr_t) o3 + output_stride);
uint16_t* o5 = (uint16_t*) ((uintptr_t) o4 + output_stride);
uint16_t* o6 = (uint16_t*) ((uintptr_t) o5 + output_stride);
uint16_t* o7 = (uint16_t*) ((uintptr_t) o6 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 4) {
o4 = o0;
}
if XNN_UNPREDICTABLE(block_width < 6) {
o5 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 6) {
o6 = o0;
}
if XNN_UNPREDICTABLE(block_width < 8) {
o7 = o0;
}
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_7 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
const __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
const __m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
const __m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
const __m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
const __m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
_mm_storeu_si128((__m128i*) o7, v0_7);
o7 = (uint16_t*) ((uintptr_t) o7 + tile_hbytes);
_mm_storeu_si128((__m128i*) o6, v0_6);
o6 = (uint16_t*) ((uintptr_t) o6 + tile_hbytes);
_mm_storeu_si128((__m128i*) o5, v0_5);
o5 = (uint16_t*) ((uintptr_t) o5 + tile_hbytes);
_mm_storeu_si128((__m128i*) o4, v0_4);
o4 = (uint16_t*) ((uintptr_t) o4 + tile_hbytes);
_mm_storeu_si128((__m128i*) o3, v0_3);
o3 = (uint16_t*) ((uintptr_t) o3 + tile_hbytes);
_mm_storeu_si128((__m128i*) o2, v0_2);
o2 = (uint16_t*) ((uintptr_t) o2 + tile_hbytes);
_mm_storeu_si128((__m128i*) o1, v0_1);
o1 = (uint16_t*) ((uintptr_t) o1 + tile_hbytes);
_mm_storeu_si128((__m128i*) o0, v0_0);
o0 = (uint16_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i1);
const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i2);
const uint16_t *i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i3);
const uint16_t *i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i4);
const uint16_t *i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i5);
const uint16_t *i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i6);
const __m128i v3_7 = _mm_undefined_si128();
const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
__m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
__m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
__m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
__m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
__m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
__m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
if (bh & 4) {
_mm_storel_epi64((__m128i*) o7, v0_7);
o7 += 4;
_mm_storel_epi64((__m128i*) o6, v0_6);
o6 += 4;
_mm_storel_epi64((__m128i*) o5, v0_5);
o5 += 4;
_mm_storel_epi64((__m128i*) o4, v0_4);
o4 += 4;
_mm_storel_epi64((__m128i*) o3, v0_3);
o3 += 4;
_mm_storel_epi64((__m128i*) o2, v0_2);
o2 += 4;
_mm_storel_epi64((__m128i*) o1, v0_1);
o1 += 4;
_mm_storel_epi64((__m128i*) o0, v0_0);
o0 += 4;
v0_0 = _mm_unpackhi_epi64(v0_0, v0_0);
v0_1 = _mm_unpackhi_epi64(v0_1, v0_1);
v0_2 = _mm_unpackhi_epi64(v0_2, v0_2);
v0_3 = _mm_unpackhi_epi64(v0_3, v0_3);
v0_4 = _mm_unpackhi_epi64(v0_4, v0_4);
v0_5 = _mm_unpackhi_epi64(v0_5, v0_5);
v0_6 = _mm_unpackhi_epi64(v0_6, v0_6);
v0_7 = _mm_unpackhi_epi64(v0_7, v0_7);
}
if (bh & 2) {
unaligned_store_u32(o7, (uint32_t) _mm_cvtsi128_si32(v0_7));
o7 += 2;
unaligned_store_u32(o6, (uint32_t) _mm_cvtsi128_si32(v0_6));
o6 += 2;
unaligned_store_u32(o5, (uint32_t) _mm_cvtsi128_si32(v0_5));
o5 += 2;
unaligned_store_u32(o4, (uint32_t) _mm_cvtsi128_si32(v0_4));
o4 += 2;
unaligned_store_u32(o3, (uint32_t) _mm_cvtsi128_si32(v0_3));
o3 += 2;
unaligned_store_u32(o2, (uint32_t) _mm_cvtsi128_si32(v0_2));
o2 += 2;
unaligned_store_u32(o1, (uint32_t) _mm_cvtsi128_si32(v0_1));
o1 += 2;
unaligned_store_u32(o0, (uint32_t) _mm_cvtsi128_si32(v0_0));
o0 += 2;
v0_0 = _mm_srli_epi64(v0_0, 32);
v0_1 = _mm_srli_epi64(v0_1, 32);
v0_2 = _mm_srli_epi64(v0_2, 32);
v0_3 = _mm_srli_epi64(v0_3, 32);
v0_4 = _mm_srli_epi64(v0_4, 32);
v0_5 = _mm_srli_epi64(v0_5, 32);
v0_6 = _mm_srli_epi64(v0_6, 32);
v0_7 = _mm_srli_epi64(v0_7, 32);
}
if (bh & 1) {
unaligned_store_u16(o7, (uint16_t) _mm_cvtsi128_si32(v0_7));
unaligned_store_u16(o6, (uint16_t) _mm_cvtsi128_si32(v0_6));
unaligned_store_u16(o5, (uint16_t) _mm_cvtsi128_si32(v0_5));
unaligned_store_u16(o4, (uint16_t) _mm_cvtsi128_si32(v0_4));
unaligned_store_u16(o3, (uint16_t) _mm_cvtsi128_si32(v0_3));
unaligned_store_u16(o2, (uint16_t) _mm_cvtsi128_si32(v0_2));
unaligned_store_u16(o1, (uint16_t) _mm_cvtsi128_si32(v0_1));
unaligned_store_u16(o0, (uint16_t) _mm_cvtsi128_si32(v0_0));
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
o0 = (uint16_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint16_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint16_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint16_t*) ((uintptr_t) o3 + output_reset);
o4 = (uint16_t*) ((uintptr_t) o4 + output_reset);
o5 = (uint16_t*) ((uintptr_t) o5 + output_reset);
o6 = (uint16_t*) ((uintptr_t) o6 + output_reset);
o7 = (uint16_t*) ((uintptr_t) o7 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 11,344
| 40.405109
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-reuse-multi-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <wasm_simd128.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__8x8_reuse_multi_wasmsimd(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
const uint16_t* i0 = input;
uint16_t* o0 = (uint16_t*) output;
uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
uint16_t* o2 = (uint16_t*) ((uintptr_t) o1 + output_stride);
uint16_t* o3 = (uint16_t*) ((uintptr_t) o2 + output_stride);
uint16_t* o4 = (uint16_t*) ((uintptr_t) o3 + output_stride);
uint16_t* o5 = (uint16_t*) ((uintptr_t) o4 + output_stride);
uint16_t* o6 = (uint16_t*) ((uintptr_t) o5 + output_stride);
uint16_t* o7 = (uint16_t*) ((uintptr_t) o6 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 4) {
o4 = o0;
}
if XNN_UNPREDICTABLE(block_width < 6) {
o5 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 6) {
o6 = o0;
}
if XNN_UNPREDICTABLE(block_width < 8) {
o7 = o0;
}
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const v128_t v3_0 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_1 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_2 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_3 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_4 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_5 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_6 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_7 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v2_0 = wasm_v16x8_shuffle(v3_0, v3_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_1 = wasm_v16x8_shuffle(v3_0, v3_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_2 = wasm_v16x8_shuffle(v3_1, v3_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_3 = wasm_v16x8_shuffle(v3_1, v3_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_4 = wasm_v16x8_shuffle(v3_2, v3_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_5 = wasm_v16x8_shuffle(v3_2, v3_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_6 = wasm_v16x8_shuffle(v3_3, v3_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_7 = wasm_v16x8_shuffle(v3_3, v3_7, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_0 = wasm_v16x8_shuffle(v2_0, v2_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_1 = wasm_v16x8_shuffle(v2_0, v2_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_2 = wasm_v16x8_shuffle(v2_1, v2_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_3 = wasm_v16x8_shuffle(v2_1, v2_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_4 = wasm_v16x8_shuffle(v2_2, v2_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_5 = wasm_v16x8_shuffle(v2_2, v2_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_6 = wasm_v16x8_shuffle(v2_3, v2_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_7 = wasm_v16x8_shuffle(v2_3, v2_7, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_0 = wasm_v16x8_shuffle(v1_0, v1_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_1 = wasm_v16x8_shuffle(v1_0, v1_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_2 = wasm_v16x8_shuffle(v1_1, v1_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_3 = wasm_v16x8_shuffle(v1_1, v1_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_4 = wasm_v16x8_shuffle(v1_2, v1_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_5 = wasm_v16x8_shuffle(v1_2, v1_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_6 = wasm_v16x8_shuffle(v1_3, v1_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_7 = wasm_v16x8_shuffle(v1_3, v1_7, 4, 12, 5, 13, 6, 14, 7, 15);
wasm_v128_store(o7, v0_7);
o7 = (uint16_t*) ((uintptr_t) o7 + tile_hbytes);
wasm_v128_store(o6, v0_6);
o6 = (uint16_t*) ((uintptr_t) o6 + tile_hbytes);
wasm_v128_store(o5, v0_5);
o5 = (uint16_t*) ((uintptr_t) o5 + tile_hbytes);
wasm_v128_store(o4, v0_4);
o4 = (uint16_t*) ((uintptr_t) o4 + tile_hbytes);
wasm_v128_store(o3, v0_3);
o3 = (uint16_t*) ((uintptr_t) o3 + tile_hbytes);
wasm_v128_store(o2, v0_2);
o2 = (uint16_t*) ((uintptr_t) o2 + tile_hbytes);
wasm_v128_store(o1, v0_1);
o1 = (uint16_t*) ((uintptr_t) o1 + tile_hbytes);
wasm_v128_store(o0, v0_0);
o0 = (uint16_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const v128_t v3_0 = wasm_v128_load(i0);
const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const v128_t v3_1 = wasm_v128_load(i1);
const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const v128_t v3_2 = wasm_v128_load(i2);
const uint16_t *i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const v128_t v3_3 = wasm_v128_load(i3);
const uint16_t *i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const v128_t v3_4 = wasm_v128_load(i4);
const uint16_t *i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const v128_t v3_5 = wasm_v128_load(i5);
const uint16_t *i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const v128_t v3_6 = wasm_v128_load(i6);
const v128_t v3_7 = wasm_v128_xor(v3_0, v3_0);
const v128_t v2_0 = wasm_v16x8_shuffle(v3_0, v3_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_1 = wasm_v16x8_shuffle(v3_0, v3_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_2 = wasm_v16x8_shuffle(v3_1, v3_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_3 = wasm_v16x8_shuffle(v3_1, v3_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_4 = wasm_v16x8_shuffle(v3_2, v3_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_5 = wasm_v16x8_shuffle(v3_2, v3_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_6 = wasm_v16x8_shuffle(v3_3, v3_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_7 = wasm_v16x8_shuffle(v3_3, v3_7, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_0 = wasm_v16x8_shuffle(v2_0, v2_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_1 = wasm_v16x8_shuffle(v2_0, v2_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_2 = wasm_v16x8_shuffle(v2_1, v2_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_3 = wasm_v16x8_shuffle(v2_1, v2_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_4 = wasm_v16x8_shuffle(v2_2, v2_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_5 = wasm_v16x8_shuffle(v2_2, v2_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_6 = wasm_v16x8_shuffle(v2_3, v2_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_7 = wasm_v16x8_shuffle(v2_3, v2_7, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_0 = wasm_v16x8_shuffle(v1_0, v1_4, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_1 = wasm_v16x8_shuffle(v1_0, v1_4, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_2 = wasm_v16x8_shuffle(v1_1, v1_5, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_3 = wasm_v16x8_shuffle(v1_1, v1_5, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_4 = wasm_v16x8_shuffle(v1_2, v1_6, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_5 = wasm_v16x8_shuffle(v1_2, v1_6, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_6 = wasm_v16x8_shuffle(v1_3, v1_7, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_7 = wasm_v16x8_shuffle(v1_3, v1_7, 4, 12, 5, 13, 6, 14, 7, 15);
if (bh & 4) {
wasm_v128_store64_lane(o7, v0_7, 0);
o7 += 4;
wasm_v128_store64_lane(o6, v0_6, 0);
o6 += 4;
wasm_v128_store64_lane(o5, v0_5, 0);
o5 += 4;
wasm_v128_store64_lane(o4, v0_4, 0);
o4 += 4;
wasm_v128_store64_lane(o3, v0_3, 0);
o3 += 4;
wasm_v128_store64_lane(o2, v0_2, 0);
o2 += 4;
wasm_v128_store64_lane(o1, v0_1, 0);
o1 += 4;
wasm_v128_store64_lane(o0, v0_0, 0);
o0 += 4;
v0_0 = wasm_v64x2_shuffle(v0_0, v0_0, 1, 1);
v0_1 = wasm_v64x2_shuffle(v0_1, v0_1, 1, 1);
v0_2 = wasm_v64x2_shuffle(v0_2, v0_2, 1, 1);
v0_3 = wasm_v64x2_shuffle(v0_3, v0_3, 1, 1);
v0_4 = wasm_v64x2_shuffle(v0_4, v0_4, 1, 1);
v0_5 = wasm_v64x2_shuffle(v0_5, v0_5, 1, 1);
v0_6 = wasm_v64x2_shuffle(v0_6, v0_6, 1, 1);
v0_7 = wasm_v64x2_shuffle(v0_7, v0_7, 1, 1);
}
if (bh & 2) {
wasm_v128_store32_lane(o7, v0_7, 0);
o7 += 2;
wasm_v128_store32_lane(o6, v0_6, 0);
o6 += 2;
wasm_v128_store32_lane(o5, v0_5, 0);
o5 += 2;
wasm_v128_store32_lane(o4, v0_4, 0);
o4 += 2;
wasm_v128_store32_lane(o3, v0_3, 0);
o3 += 2;
wasm_v128_store32_lane(o2, v0_2, 0);
o2 += 2;
wasm_v128_store32_lane(o1, v0_1, 0);
o1 += 2;
wasm_v128_store32_lane(o0, v0_0, 0);
o0 += 2;
v0_0 = wasm_u64x2_shr(v0_0, 32);
v0_1 = wasm_u64x2_shr(v0_1, 32);
v0_2 = wasm_u64x2_shr(v0_2, 32);
v0_3 = wasm_u64x2_shr(v0_3, 32);
v0_4 = wasm_u64x2_shr(v0_4, 32);
v0_5 = wasm_u64x2_shr(v0_5, 32);
v0_6 = wasm_u64x2_shr(v0_6, 32);
v0_7 = wasm_u64x2_shr(v0_7, 32);
}
if (bh & 1) {
wasm_v128_store16_lane(o7, v0_7, 0);
wasm_v128_store16_lane(o6, v0_6, 0);
wasm_v128_store16_lane(o5, v0_5, 0);
wasm_v128_store16_lane(o4, v0_4, 0);
wasm_v128_store16_lane(o3, v0_3, 0);
wasm_v128_store16_lane(o2, v0_2, 0);
wasm_v128_store16_lane(o1, v0_1, 0);
wasm_v128_store16_lane(o0, v0_0, 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
o0 = (uint16_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint16_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint16_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint16_t*) ((uintptr_t) o3 + output_reset);
o4 = (uint16_t*) ((uintptr_t) o4 + output_reset);
o5 = (uint16_t*) ((uintptr_t) o5 + output_reset);
o6 = (uint16_t*) ((uintptr_t) o6 + output_reset);
o7 = (uint16_t*) ((uintptr_t) o7 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 11,837
| 43.171642
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-reuse-multi-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__8x8_reuse_multi_zip_neon(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
const uint16_t* i0 = input;
uint16_t* o0 = (uint16_t*) output;
uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
uint16_t* o2 = (uint16_t*) ((uintptr_t) o1 + output_stride);
uint16_t* o3 = (uint16_t*) ((uintptr_t) o2 + output_stride);
uint16_t* o4 = (uint16_t*) ((uintptr_t) o3 + output_stride);
uint16_t* o5 = (uint16_t*) ((uintptr_t) o4 + output_stride);
uint16_t* o6 = (uint16_t*) ((uintptr_t) o5 + output_stride);
uint16_t* o7 = (uint16_t*) ((uintptr_t) o6 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 4) {
o4 = o0;
}
if XNN_UNPREDICTABLE(block_width < 6) {
o5 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 6) {
o6 = o0;
}
if XNN_UNPREDICTABLE(block_width < 8) {
o7 = o0;
}
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const uint16x8_t v3_0 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_1 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_2 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_3 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_4 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_5 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_6 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_7 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4);
const uint16x8x2_t v2_1 = vzipq_u16(v3_1, v3_5);
const uint16x8x2_t v2_2 = vzipq_u16(v3_2, v3_6);
const uint16x8x2_t v2_3 = vzipq_u16(v3_3, v3_7);
const uint16x8x2_t v1_0 = vzipq_u16(v2_0.val[0], v2_2.val[0]);
const uint16x8x2_t v1_1 = vzipq_u16(v2_0.val[1], v2_2.val[1]);
const uint16x8x2_t v1_2 = vzipq_u16(v2_1.val[0], v2_3.val[0]);
const uint16x8x2_t v1_3 = vzipq_u16(v2_1.val[1], v2_3.val[1]);
const uint16x8x2_t v0_0 = vzipq_u16(v1_0.val[0], v1_2.val[0]);
const uint16x8x2_t v0_1 = vzipq_u16(v1_0.val[1], v1_2.val[1]);
const uint16x8x2_t v0_2 = vzipq_u16(v1_1.val[0], v1_3.val[0]);
const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]);
vst1q_u16(o7, v0_3.val[1]); o7 = (uint16_t*) ((uintptr_t) o7 + tile_hbytes);
vst1q_u16(o6, v0_3.val[0]); o6 = (uint16_t*) ((uintptr_t) o6 + tile_hbytes);
vst1q_u16(o5, v0_2.val[1]); o5 = (uint16_t*) ((uintptr_t) o5 + tile_hbytes);
vst1q_u16(o4, v0_2.val[0]); o4 = (uint16_t*) ((uintptr_t) o4 + tile_hbytes);
vst1q_u16(o3, v0_1.val[1]); o3 = (uint16_t*) ((uintptr_t) o3 + tile_hbytes);
vst1q_u16(o2, v0_1.val[0]); o2 = (uint16_t*) ((uintptr_t) o2 + tile_hbytes);
vst1q_u16(o1, v0_0.val[1]); o1 = (uint16_t*) ((uintptr_t) o1 + tile_hbytes);
vst1q_u16(o0, v0_0.val[0]); o0 = (uint16_t*) ((uintptr_t) o0 + tile_hbytes);
}
if (bh != 0) {
const uint16x8_t v3_0 = vld1q_u16(i0);
const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint16x8_t v3_1 = vld1q_u16(i1);
const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint16x8_t v3_2 = vld1q_u16(i2);
const uint16_t *i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const uint16x8_t v3_3 = vld1q_u16(i3);
const uint16_t *i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const uint16x8_t v3_4 = vld1q_u16(i4);
const uint16_t *i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const uint16x8_t v3_5 = vld1q_u16(i5);
const uint16_t *i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const uint16x8_t v3_6 = vld1q_u16(i6);
const uint16x8_t v3_7 = vmovq_n_u16(0);
const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4);
const uint16x8x2_t v2_1 = vzipq_u16(v3_1, v3_5);
const uint16x8x2_t v2_2 = vzipq_u16(v3_2, v3_6);
const uint16x8x2_t v2_3 = vzipq_u16(v3_3, v3_7);
const uint16x8x2_t v1_0 = vzipq_u16(v2_0.val[0], v2_2.val[0]);
const uint16x8x2_t v1_1 = vzipq_u16(v2_0.val[1], v2_2.val[1]);
const uint16x8x2_t v1_2 = vzipq_u16(v2_1.val[0], v2_3.val[0]);
const uint16x8x2_t v1_3 = vzipq_u16(v2_1.val[1], v2_3.val[1]);
const uint16x8x2_t v0_0 = vzipq_u16(v1_0.val[0], v1_2.val[0]);
const uint16x8x2_t v0_1 = vzipq_u16(v1_0.val[1], v1_2.val[1]);
const uint16x8x2_t v0_2 = vzipq_u16(v1_1.val[0], v1_3.val[0]);
const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]);
uint16x4_t v0_low = vget_low_u16(v0_0.val[0]);
uint16x4_t v1_low = vget_low_u16(v0_0.val[1]);
uint16x4_t v2_low = vget_low_u16(v0_1.val[0]);
uint16x4_t v3_low = vget_low_u16(v0_1.val[1]);
uint16x4_t v4_low = vget_low_u16(v0_2.val[0]);
uint16x4_t v5_low = vget_low_u16(v0_2.val[1]);
uint16x4_t v6_low = vget_low_u16(v0_3.val[0]);
uint16x4_t v7_low = vget_low_u16(v0_3.val[1]);
if (bh & 4) {
vst1_u16(o7, v7_low); o7 += 4;
vst1_u16(o6, v6_low); o6 += 4;
vst1_u16(o5, v5_low); o5 += 4;
vst1_u16(o4, v4_low); o4 += 4;
vst1_u16(o3, v3_low); o3 += 4;
vst1_u16(o2, v2_low); o2 += 4;
vst1_u16(o1, v1_low); o1 += 4;
vst1_u16(o0, v0_low); o0 += 4;
v0_low = vget_high_u16(v0_0.val[0]);
v1_low = vget_high_u16(v0_0.val[1]);
v2_low = vget_high_u16(v0_1.val[0]);
v3_low = vget_high_u16(v0_1.val[1]);
v4_low = vget_high_u16(v0_2.val[0]);
v5_low = vget_high_u16(v0_2.val[1]);
v6_low = vget_high_u16(v0_3.val[0]);
v7_low = vget_high_u16(v0_3.val[1]);
}
if (bh & 2) {
vst1_lane_u32((void*) o7, vreinterpret_u32_u16(v7_low), 0); o7 += 2;
vst1_lane_u32((void*) o6, vreinterpret_u32_u16(v6_low), 0); o6 += 2;
vst1_lane_u32((void*) o5, vreinterpret_u32_u16(v5_low), 0); o5 += 2;
vst1_lane_u32((void*) o4, vreinterpret_u32_u16(v4_low), 0); o4 += 2;
vst1_lane_u32((void*) o3, vreinterpret_u32_u16(v3_low), 0); o3 += 2;
vst1_lane_u32((void*) o2, vreinterpret_u32_u16(v2_low), 0); o2 += 2;
vst1_lane_u32((void*) o1, vreinterpret_u32_u16(v1_low), 0); o1 += 2;
vst1_lane_u32((void*) o0, vreinterpret_u32_u16(v0_low), 0); o0 += 2;
v0_low = vext_u16(v0_low, v0_low, 2);
v1_low = vext_u16(v1_low, v1_low, 2);
v2_low = vext_u16(v2_low, v2_low, 2);
v3_low = vext_u16(v3_low, v3_low, 2);
v4_low = vext_u16(v4_low, v4_low, 2);
v5_low = vext_u16(v5_low, v5_low, 2);
v6_low = vext_u16(v6_low, v6_low, 2);
v7_low = vext_u16(v7_low, v7_low, 2);
}
if (bh & 1) {
vst1_lane_u16(o7, v7_low, 0);
vst1_lane_u16(o6, v6_low, 0);
vst1_lane_u16(o5, v5_low, 0);
vst1_lane_u16(o4, v4_low, 0);
vst1_lane_u16(o3, v3_low, 0);
vst1_lane_u16(o2, v2_low, 0);
vst1_lane_u16(o1, v1_low, 0);
vst1_lane_u16(o0, v0_low, 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
o0 = (uint16_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint16_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint16_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint16_t*) ((uintptr_t) o3 + output_reset);
o4 = (uint16_t*) ((uintptr_t) o4 + output_reset);
o5 = (uint16_t*) ((uintptr_t) o5 + output_reset);
o6 = (uint16_t*) ((uintptr_t) o6 + output_reset);
o7 = (uint16_t*) ((uintptr_t) o7 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 9,517
| 41.873874
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-reuse-switch-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <immintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x16_transposec_ukernel__8x8_reuse_switch_sse2(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
const uint16_t* i0 = input;
uint16_t* o = (uint16_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v3_7 = _mm_loadu_si128((const __m128i*) i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
const __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
const __m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
const __m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
const __m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
const __m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
_mm_storeu_si128((__m128i*) oN, v0_7);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
_mm_storeu_si128((__m128i*) oN, v0_6);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
_mm_storeu_si128((__m128i*) oN, v0_5);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
_mm_storeu_si128((__m128i*) oN, v0_4);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
_mm_storeu_si128((__m128i*) oN, v0_3);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storeu_si128((__m128i*) oN, v0_2);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storeu_si128((__m128i*) oN, v0_1);
case 0:
_mm_storeu_si128((__m128i*) o, v0_0);
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i1);
const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i2);
const uint16_t *i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i3);
const uint16_t *i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i4);
const uint16_t *i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i5);
const uint16_t *i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i6);
const __m128i v3_7 = _mm_undefined_si128();
const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
__m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
__m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
__m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
__m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
__m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
__m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
__m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
__m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
if (bh & 4) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
_mm_storel_epi64((__m128i*) oN, v0_7);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
_mm_storel_epi64((__m128i*) oN, v0_6);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
_mm_storel_epi64((__m128i*) oN, v0_5);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
_mm_storel_epi64((__m128i*) oN, v0_4);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
_mm_storel_epi64((__m128i*) oN, v0_3);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
_mm_storel_epi64((__m128i*) oN, v0_2);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
_mm_storel_epi64((__m128i*) oN, v0_1);
case 0:
_mm_storel_epi64((__m128i*) o, v0_0);
break;
default:
XNN_UNREACHABLE;
}
o += 4;
v0_0 = _mm_unpackhi_epi64(v0_0, v0_0);
v0_1 = _mm_unpackhi_epi64(v0_1, v0_1);
v0_2 = _mm_unpackhi_epi64(v0_2, v0_2);
v0_3 = _mm_unpackhi_epi64(v0_3, v0_3);
v0_4 = _mm_unpackhi_epi64(v0_4, v0_4);
v0_5 = _mm_unpackhi_epi64(v0_5, v0_5);
v0_6 = _mm_unpackhi_epi64(v0_6, v0_6);
v0_7 = _mm_unpackhi_epi64(v0_7, v0_7);
}
if (bh & 2) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_7));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_6));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_5));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_4));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_3));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_2));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_1));
case 0:
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_0));
break;
default:
XNN_UNREACHABLE;
}
o += 2;
v0_0 = _mm_srli_epi64(v0_0, 32);
v0_1 = _mm_srli_epi64(v0_1, 32);
v0_2 = _mm_srli_epi64(v0_2, 32);
v0_3 = _mm_srli_epi64(v0_3, 32);
v0_4 = _mm_srli_epi64(v0_4, 32);
v0_5 = _mm_srli_epi64(v0_5, 32);
v0_6 = _mm_srli_epi64(v0_6, 32);
v0_7 = _mm_srli_epi64(v0_7, 32);
}
if (bh & 1) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_7));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_6));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_5));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_4));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_3));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_2));
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_1));
case 0:
unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_0));
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 12,565
| 40.747508
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-reuse-switch-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <wasm_simd128.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__8x8_reuse_switch_wasmsimd(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
const uint16_t* i0 = input;
uint16_t* o = (uint16_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const v128_t v3_0 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_1 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_2 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_3 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_4 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_5 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_6 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v3_7 = wasm_v128_load(i0);
i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const v128_t v2_0 = wasm_v16x8_shuffle(v3_0, v3_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_1 = wasm_v16x8_shuffle(v3_0, v3_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_2 = wasm_v16x8_shuffle(v3_1, v3_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_3 = wasm_v16x8_shuffle(v3_1, v3_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_4 = wasm_v16x8_shuffle(v3_2, v3_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_5 = wasm_v16x8_shuffle(v3_2, v3_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_6 = wasm_v16x8_shuffle(v3_3, v3_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_7 = wasm_v16x8_shuffle(v3_3, v3_7, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_0 = wasm_v16x8_shuffle(v2_0, v2_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_1 = wasm_v16x8_shuffle(v2_0, v2_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_2 = wasm_v16x8_shuffle(v2_1, v2_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_3 = wasm_v16x8_shuffle(v2_1, v2_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_4 = wasm_v16x8_shuffle(v2_2, v2_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_5 = wasm_v16x8_shuffle(v2_2, v2_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_6 = wasm_v16x8_shuffle(v2_3, v2_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_7 = wasm_v16x8_shuffle(v2_3, v2_7, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_0 = wasm_v16x8_shuffle(v1_0, v1_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_1 = wasm_v16x8_shuffle(v1_0, v1_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_2 = wasm_v16x8_shuffle(v1_1, v1_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_3 = wasm_v16x8_shuffle(v1_1, v1_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_4 = wasm_v16x8_shuffle(v1_2, v1_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_5 = wasm_v16x8_shuffle(v1_2, v1_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v0_6 = wasm_v16x8_shuffle(v1_3, v1_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v0_7 = wasm_v16x8_shuffle(v1_3, v1_7, 4, 12, 5, 13, 6, 14, 7, 15);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
wasm_v128_store(oN, v0_7);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
wasm_v128_store(oN, v0_6);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
wasm_v128_store(oN, v0_5);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
wasm_v128_store(oN, v0_4);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
wasm_v128_store(oN, v0_3);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
wasm_v128_store(oN, v0_2);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
wasm_v128_store(oN, v0_1);
case 0:
wasm_v128_store(o, v0_0);
o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const v128_t v3_0 = wasm_v128_load(i0);
const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const v128_t v3_1 = wasm_v128_load(i1);
const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const v128_t v3_2 = wasm_v128_load(i2);
const uint16_t *i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const v128_t v3_3 = wasm_v128_load(i3);
const uint16_t *i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const v128_t v3_4 = wasm_v128_load(i4);
const uint16_t *i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const v128_t v3_5 = wasm_v128_load(i5);
const uint16_t *i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const v128_t v3_6 = wasm_v128_load(i6);
const v128_t v3_7 = wasm_v128_xor(v3_0, v3_0);
const v128_t v2_0 = wasm_v16x8_shuffle(v3_0, v3_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_1 = wasm_v16x8_shuffle(v3_0, v3_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_2 = wasm_v16x8_shuffle(v3_1, v3_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_3 = wasm_v16x8_shuffle(v3_1, v3_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_4 = wasm_v16x8_shuffle(v3_2, v3_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_5 = wasm_v16x8_shuffle(v3_2, v3_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v2_6 = wasm_v16x8_shuffle(v3_3, v3_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v2_7 = wasm_v16x8_shuffle(v3_3, v3_7, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_0 = wasm_v16x8_shuffle(v2_0, v2_4, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_1 = wasm_v16x8_shuffle(v2_0, v2_4, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_2 = wasm_v16x8_shuffle(v2_1, v2_5, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_3 = wasm_v16x8_shuffle(v2_1, v2_5, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_4 = wasm_v16x8_shuffle(v2_2, v2_6, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_5 = wasm_v16x8_shuffle(v2_2, v2_6, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t v1_6 = wasm_v16x8_shuffle(v2_3, v2_7, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t v1_7 = wasm_v16x8_shuffle(v2_3, v2_7, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_0 = wasm_v16x8_shuffle(v1_0, v1_4, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_1 = wasm_v16x8_shuffle(v1_0, v1_4, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_2 = wasm_v16x8_shuffle(v1_1, v1_5, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_3 = wasm_v16x8_shuffle(v1_1, v1_5, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_4 = wasm_v16x8_shuffle(v1_2, v1_6, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_5 = wasm_v16x8_shuffle(v1_2, v1_6, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t v0_6 = wasm_v16x8_shuffle(v1_3, v1_7, 0, 8, 1, 9, 2, 10, 3, 11);
v128_t v0_7 = wasm_v16x8_shuffle(v1_3, v1_7, 4, 12, 5, 13, 6, 14, 7, 15);
if (bh & 4) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
wasm_v128_store64_lane(oN, v0_7, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
wasm_v128_store64_lane(oN, v0_6, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
wasm_v128_store64_lane(oN, v0_5, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
wasm_v128_store64_lane(oN, v0_4, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
wasm_v128_store64_lane(oN, v0_3, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
wasm_v128_store64_lane(oN, v0_2, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
wasm_v128_store64_lane(oN, v0_1, 0);
case 0:
wasm_v128_store64_lane(o, v0_0, 0);
o += 4;
break;
default:
XNN_UNREACHABLE;
}
v0_0 = wasm_v64x2_shuffle(v0_0, v0_0, 1, 1);
v0_1 = wasm_v64x2_shuffle(v0_1, v0_1, 1, 1);
v0_2 = wasm_v64x2_shuffle(v0_2, v0_2, 1, 1);
v0_3 = wasm_v64x2_shuffle(v0_3, v0_3, 1, 1);
v0_4 = wasm_v64x2_shuffle(v0_4, v0_4, 1, 1);
v0_5 = wasm_v64x2_shuffle(v0_5, v0_5, 1, 1);
v0_6 = wasm_v64x2_shuffle(v0_6, v0_6, 1, 1);
v0_7 = wasm_v64x2_shuffle(v0_7, v0_7, 1, 1);
}
if (bh & 2) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
wasm_v128_store32_lane(oN, v0_7, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
wasm_v128_store32_lane(oN, v0_6, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
wasm_v128_store32_lane(oN, v0_5, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
wasm_v128_store32_lane(oN, v0_4, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
wasm_v128_store32_lane(oN, v0_3, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
wasm_v128_store32_lane(oN, v0_2, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
wasm_v128_store32_lane(oN, v0_1, 0);
case 0:
wasm_v128_store32_lane(o, v0_0, 0);
o += 2;
break;
default:
XNN_UNREACHABLE;
}
v0_0 = wasm_u64x2_shr(v0_0, 32);
v0_1 = wasm_u64x2_shr(v0_1, 32);
v0_2 = wasm_u64x2_shr(v0_2, 32);
v0_3 = wasm_u64x2_shr(v0_3, 32);
v0_4 = wasm_u64x2_shr(v0_4, 32);
v0_5 = wasm_u64x2_shr(v0_5, 32);
v0_6 = wasm_u64x2_shr(v0_6, 32);
v0_7 = wasm_u64x2_shr(v0_7, 32);
}
if (bh & 1) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
wasm_v128_store16_lane(oN, v0_7, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
wasm_v128_store16_lane(oN, v0_6, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
wasm_v128_store16_lane(oN, v0_5, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
wasm_v128_store16_lane(oN, v0_4, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
wasm_v128_store16_lane(oN, v0_3, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
wasm_v128_store16_lane(oN, v0_2, 0);
oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
wasm_v128_store16_lane(oN, v0_1, 0);
case 0:
wasm_v128_store16_lane(o, v0_0, 0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 13,066
| 43.294915
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-transposec/gen/x16-transposec-8x8-reuse-switch-zip-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-transposec/neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <arm_neon.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x16_transposec_ukernel__8x8_reuse_switch_zip_neon(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint16_t));
assert(input_stride >= block_width * sizeof(uint16_t));
const size_t tile_height = 8;
const size_t tile_width = 8;
const size_t tile_hbytes = tile_height * sizeof(uint16_t);
const size_t tile_wbytes = tile_width * sizeof(uint16_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
const uint16_t* i0 = input;
uint16_t* o = (uint16_t*) output;
const size_t minus_output_stride = -output_stride;
do {
const size_t rem = min(block_width - 1, 7);
const size_t oN_stride = rem * output_stride;
size_t bh = block_height;
for (; bh >= 8; bh -= 8) {
const uint16x8_t v3_0 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_1 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_2 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_3 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_4 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_5 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_6 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8_t v3_7 = vld1q_u16(i0); i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4);
const uint16x8x2_t v2_1 = vzipq_u16(v3_1, v3_5);
const uint16x8x2_t v2_2 = vzipq_u16(v3_2, v3_6);
const uint16x8x2_t v2_3 = vzipq_u16(v3_3, v3_7);
const uint16x8x2_t v1_0 = vzipq_u16(v2_0.val[0], v2_2.val[0]);
const uint16x8x2_t v1_1 = vzipq_u16(v2_0.val[1], v2_2.val[1]);
const uint16x8x2_t v1_2 = vzipq_u16(v2_1.val[0], v2_3.val[0]);
const uint16x8x2_t v1_3 = vzipq_u16(v2_1.val[1], v2_3.val[1]);
const uint16x8x2_t v0_0 = vzipq_u16(v1_0.val[0], v1_2.val[0]);
const uint16x8x2_t v0_1 = vzipq_u16(v1_0.val[1], v1_2.val[1]);
const uint16x8x2_t v0_2 = vzipq_u16(v1_1.val[0], v1_3.val[0]);
const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]);
uint16_t *oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1q_u16(oN, v0_3.val[1]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1q_u16(oN, v0_3.val[0]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1q_u16(oN, v0_2.val[1]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1q_u16(oN, v0_2.val[0]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1q_u16(oN, v0_1.val[1]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1q_u16(oN, v0_1.val[0]); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1q_u16(oN, v0_0.val[1]);
case 0:
vst1q_u16(o, v0_0.val[0]); o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
break;
default:
XNN_UNREACHABLE;
}
}
if (bh != 0) {
const uint16x8_t v3_0 = vld1q_u16(i0);
const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const uint16x8_t v3_1 = vld1q_u16(i1);
const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i1;
}
const uint16x8_t v3_2 = vld1q_u16(i2);
const uint16_t *i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(bh < 4) {
i3 = i2;
}
const uint16x8_t v3_3 = vld1q_u16(i3);
const uint16_t *i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(bh <= 4) {
i4 = i3;
}
const uint16x8_t v3_4 = vld1q_u16(i4);
const uint16_t *i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(bh < 6) {
i5 = i4;
}
const uint16x8_t v3_5 = vld1q_u16(i5);
const uint16_t *i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(bh <= 6) {
i6 = i5;
}
const uint16x8_t v3_6 = vld1q_u16(i6);
const uint16x8_t v3_7 = vmovq_n_u16(0);
const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4);
const uint16x8x2_t v2_1 = vzipq_u16(v3_1, v3_5);
const uint16x8x2_t v2_2 = vzipq_u16(v3_2, v3_6);
const uint16x8x2_t v2_3 = vzipq_u16(v3_3, v3_7);
const uint16x8x2_t v1_0 = vzipq_u16(v2_0.val[0], v2_2.val[0]);
const uint16x8x2_t v1_1 = vzipq_u16(v2_0.val[1], v2_2.val[1]);
const uint16x8x2_t v1_2 = vzipq_u16(v2_1.val[0], v2_3.val[0]);
const uint16x8x2_t v1_3 = vzipq_u16(v2_1.val[1], v2_3.val[1]);
const uint16x8x2_t v0_0 = vzipq_u16(v1_0.val[0], v1_2.val[0]);
const uint16x8x2_t v0_1 = vzipq_u16(v1_0.val[1], v1_2.val[1]);
const uint16x8x2_t v0_2 = vzipq_u16(v1_1.val[0], v1_3.val[0]);
const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]);
uint16x4_t v0_low = vget_low_u16(v0_0.val[0]);
uint16x4_t v1_low = vget_low_u16(v0_0.val[1]);
uint16x4_t v2_low = vget_low_u16(v0_1.val[0]);
uint16x4_t v3_low = vget_low_u16(v0_1.val[1]);
uint16x4_t v4_low = vget_low_u16(v0_2.val[0]);
uint16x4_t v5_low = vget_low_u16(v0_2.val[1]);
uint16x4_t v6_low = vget_low_u16(v0_3.val[0]);
uint16x4_t v7_low = vget_low_u16(v0_3.val[1]);
if (bh & 4) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1_u16(oN, v7_low); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_u16(oN, v6_low); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_u16(oN, v5_low); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_u16(oN, v4_low); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_u16(oN, v3_low); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_u16(oN, v2_low); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_u16(oN, v1_low);
case 0:
vst1_u16(o, v0_low); o += 4;
break;
default:
XNN_UNREACHABLE;
}
v0_low = vget_high_u16(v0_0.val[0]);
v1_low = vget_high_u16(v0_0.val[1]);
v2_low = vget_high_u16(v0_1.val[0]);
v3_low = vget_high_u16(v0_1.val[1]);
v4_low = vget_high_u16(v0_2.val[0]);
v5_low = vget_high_u16(v0_2.val[1]);
v6_low = vget_high_u16(v0_3.val[0]);
v7_low = vget_high_u16(v0_3.val[1]);
}
if (bh & 2) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v7_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v6_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v5_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v4_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v3_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v2_low), 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u32((void*) oN, vreinterpret_u32_u16(v1_low), 0);
case 0:
vst1_lane_u32((void*) o, vreinterpret_u32_u16(v0_low), 0); o += 2;
break;
default:
XNN_UNREACHABLE;
}
v0_low = vext_u16(v0_low, v0_low, 2);
v1_low = vext_u16(v1_low, v1_low, 2);
v2_low = vext_u16(v2_low, v2_low, 2);
v3_low = vext_u16(v3_low, v3_low, 2);
v4_low = vext_u16(v4_low, v4_low, 2);
v5_low = vext_u16(v5_low, v5_low, 2);
v6_low = vext_u16(v6_low, v6_low, 2);
v7_low = vext_u16(v7_low, v7_low, 2);
}
if (bh & 1) {
uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
switch (rem) {
case 7:
vst1_lane_u16(oN, v7_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 6:
vst1_lane_u16(oN, v6_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 5:
vst1_lane_u16(oN, v5_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 4:
vst1_lane_u16(oN, v4_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 3:
vst1_lane_u16(oN, v3_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 2:
vst1_lane_u16(oN, v2_low, 0); oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
case 1:
vst1_lane_u16(oN, v1_low, 0);
case 0:
vst1_lane_u16(o, v0_low, 0);
break;
default:
XNN_UNREACHABLE;
}
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
o = (uint16_t*) ((uintptr_t) o + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 10,612
| 42.142276
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/x24-transposec/x24-transposec-2x2-neon-tbl64.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x24_transposec_ukernel__2x2_neon_tbl64(
const void* input,
void* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x24_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * 3);
assert(input_stride >= block_width * 3);
const size_t tile_height = 2;
const size_t tile_width = 2;
const size_t tile_wbytes = tile_width * 3;
const size_t tile_wbytes_minus_4 = tile_wbytes - 4;
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_height * output_stride - block_height * 3;
const size_t tile_stride = tile_height * input_stride;
const uint8_t* i0 = (const uint8_t*) input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
uint8_t* o0 = (uint8_t*) output;
uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride);
const uint8x8_t vperm0 = vld1_u8(params->neon_tbl64.pos0);
const uint8x8_t vperm1 = vld1_u8(params->neon_tbl64.pos1);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
uint8x8x2_t v;
v.val[0] = vld1_u8(i0); i0 = (const uint8_t*) ((uintptr_t) i0 + tile_stride);
v.val[1] = vld1_u8(i1); i1 = (const uint8_t*) ((uintptr_t) i1 + tile_stride);
const uint8x8_t vres0 = vtbl2_u8(v, vperm0);
const uint8x8_t vres1 = vtbl2_u8(v, vperm1);
vst1_lane_u32((void*) o1, vreinterpret_u32_u8(vres1), 0); o1 = (uint8_t*) ((uintptr_t) o1 + 4);
vst1_lane_u32((void*) o0, vreinterpret_u32_u8(vres0), 0); o0 = (uint8_t*) ((uintptr_t) o0 + 4);
vst1_lane_u16((void*) o1, vreinterpret_u16_u8(vres1), 2); o1 = (uint8_t*) ((uintptr_t) o1 + tile_wbytes_minus_4);
vst1_lane_u16((void*) o0, vreinterpret_u16_u8(vres0), 2); o0 = (uint8_t*) ((uintptr_t) o0 + tile_wbytes_minus_4);
}
if (bh != 0) {
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
uint8x8_t v = vld1_u8(i0);
const uint8x8_t vres0 = vtbl1_u8(v, vperm0);
const uint8x8_t vres1 = vtbl1_u8(v, vperm1);
if (bh & 1) {
vst1_lane_u16((void*) o1, vreinterpret_u16_u8(vres1), 0); o1 += 2;
vst1_lane_u16((void*) o0, vreinterpret_u16_u8(vres0), 0); o0 += 2;
vst1_lane_u8(o1, vres1, 2); o1 += 1;
vst1_lane_u8(o0, vres0, 2); o0 += 1;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
o0 = (uint8_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint8_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,050
| 34.894118
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/x24-transposec/x24-transposec-4x4-aarch64-neon-tbl128.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/microparams.h>
void xnn_x24_transposec_ukernel__4x4_aarch64_neon_tbl128(
const void* input,
void* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x24_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * 3);
assert(input_stride >= block_width * 3);
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * 3;
const size_t tile_wbytes_minus_8 = tile_wbytes - 8;
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - block_height * 3;
const size_t tile_stride = tile_height * input_stride;
const uint8_t* i0 = (const uint8_t*) input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
uint8_t* o0 = (uint8_t*) output;
uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride);
uint8_t* o2 = (uint8_t*) ((uintptr_t) o1 + output_stride);
uint8_t* o3 = (uint8_t*) ((uintptr_t) o2 + output_stride);
const uint8x16_t vperm0 = vld1q_u8(params->neon_tbl128.pos0);
const uint8x16_t vperm1 = vld1q_u8(params->neon_tbl128.pos1);
const uint8x16_t vperm2 = vld1q_u8(params->neon_tbl128.pos2);
const uint8x16_t vperm3 = vld1q_u8(params->neon_tbl128.pos3);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
uint8x16x4_t v;
v.val[0] = vld1q_u8(i0); i0 = (const uint8_t*) ((uintptr_t) i0 + tile_stride);
v.val[1] = vld1q_u8(i1); i1 = (const uint8_t*) ((uintptr_t) i1 + tile_stride);
v.val[2] = vld1q_u8(i2); i2 = (const uint8_t*) ((uintptr_t) i2 + tile_stride);
v.val[3] = vld1q_u8(i3); i3 = (const uint8_t*) ((uintptr_t) i3 + tile_stride);
const uint8x16_t vres0 = vqtbl4q_u8(v, vperm0);
const uint8x16_t vres1 = vqtbl4q_u8(v, vperm1);
const uint8x16_t vres2 = vqtbl4q_u8(v, vperm2);
const uint8x16_t vres3 = vqtbl4q_u8(v, vperm3);
vst1_u8(o3, vget_low_u8(vres3)); o3 += 8;
vst1_u8(o2, vget_low_u8(vres2)); o2 += 8;
vst1_u8(o1, vget_low_u8(vres1)); o1 += 8;
vst1_u8(o0, vget_low_u8(vres0)); o0 += 8;
vst1q_lane_u32((void*) o3, vreinterpretq_u32_u8(vres3), 2); o3 = (uint8_t*) ((uintptr_t) o3 + tile_wbytes_minus_8);
vst1q_lane_u32((void*) o2, vreinterpretq_u32_u8(vres2), 2); o2 = (uint8_t*) ((uintptr_t) o2 + tile_wbytes_minus_8);
vst1q_lane_u32((void*) o1, vreinterpretq_u32_u8(vres1), 2); o1 = (uint8_t*) ((uintptr_t) o1 + tile_wbytes_minus_8);
vst1q_lane_u32((void*) o0, vreinterpretq_u32_u8(vres0), 2); o0 = (uint8_t*) ((uintptr_t) o0 + tile_wbytes_minus_8);
}
if (bh != 0) {
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
uint8x16x3_t v;
v.val[0] = vld1q_u8(i0);
v.val[1] = vld1q_u8(i1);
v.val[2] = vld1q_u8(i2);
uint8x16_t vres0 = vqtbl3q_u8(v, vperm0);
uint8x16_t vres1 = vqtbl3q_u8(v, vperm1);
uint8x16_t vres2 = vqtbl3q_u8(v, vperm2);
uint8x16_t vres3 = vqtbl3q_u8(v, vperm3);
uint8x8_t vres0_lo = vget_low_u8(vres0);
uint8x8_t vres1_lo = vget_low_u8(vres1);
uint8x8_t vres2_lo = vget_low_u8(vres2);
uint8x8_t vres3_lo = vget_low_u8(vres3);
if (bh & 2) {
vst1_lane_u32((void*) o3, vreinterpret_u32_u8(vres3_lo), 0); o3 += 4;
vst1_lane_u32((void*) o2, vreinterpret_u32_u8(vres2_lo), 0); o2 += 4;
vst1_lane_u32((void*) o1, vreinterpret_u32_u8(vres1_lo), 0); o1 += 4;
vst1_lane_u32((void*) o0, vreinterpret_u32_u8(vres0_lo), 0); o0 += 4;
vst1_lane_u16((void*) o3, vreinterpret_u16_u8(vres3_lo), 2); o3 += 2;
vst1_lane_u16((void*) o2, vreinterpret_u16_u8(vres2_lo), 2); o2 += 2;
vst1_lane_u16((void*) o1, vreinterpret_u16_u8(vres1_lo), 2); o1 += 2;
vst1_lane_u16((void*) o0, vreinterpret_u16_u8(vres0_lo), 2); o0 += 2;
vres0_lo = vget_low_u8(vextq_u8(vres0, vres0, 6));
vres1_lo = vget_low_u8(vextq_u8(vres1, vres1, 6));
vres2_lo = vget_low_u8(vextq_u8(vres2, vres2, 6));
vres3_lo = vget_low_u8(vextq_u8(vres3, vres3, 6));
}
if (bh & 1) {
vst1_lane_u16((void*) o3, vreinterpret_u16_u8(vres3_lo), 0); o3 += 2;
vst1_lane_u16((void*) o2, vreinterpret_u16_u8(vres2_lo), 0); o2 += 2;
vst1_lane_u16((void*) o1, vreinterpret_u16_u8(vres1_lo), 0); o1 += 2;
vst1_lane_u16((void*) o0, vreinterpret_u16_u8(vres0_lo), 0); o0 += 2;
vst1_lane_u8(o3, vres3_lo, 2); o3 += 1;
vst1_lane_u8(o2, vres2_lo, 2); o2 += 1;
vst1_lane_u8(o1, vres1_lo, 2); o1 += 1;
vst1_lane_u8(o0, vres0_lo, 2); o0 += 1;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
o0 = (uint8_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint8_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint8_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint8_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 5,944
| 41.464286
| 121
|
c
|
XNNPACK
|
XNNPACK-master/src/x24-transposec/x24-transposec-4x4-ssse3.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
#include <xnnpack/unaligned.h>
void xnn_x24_transposec_ukernel__4x4_ssse3(
const void *input,
void * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x24_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * 3);
assert(input_stride >= block_width * 3);
assert(output_stride >= block_height * 3);
assert(input_stride >= block_width * 3);
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * 3;
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_width * output_stride - block_height * 3;
const size_t tile_stride = tile_height * input_stride;
const uint8_t* i0 = (const uint8_t*) input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
uint8_t* o0 = (uint8_t*) output;
uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride);
uint8_t* o2 = (uint8_t*) ((uintptr_t) o1 + output_stride);
uint8_t* o3 = (uint8_t*) ((uintptr_t) o2 + output_stride);
const __m128i vperm0 = _mm_loadu_si128((const __m128i*) params->ssse3.pos0);
const __m128i vperm1 = _mm_loadu_si128((const __m128i*) params->ssse3.pos1);
const __m128i vperm2 = _mm_loadu_si128((const __m128i*) params->ssse3.pos2);
const __m128i vperm3 = _mm_loadu_si128((const __m128i*) params->ssse3.pos3);
const __m128i vperm4 = _mm_loadu_si128((const __m128i*) params->ssse3.pos4);
const __m128i vperm5 = _mm_loadu_si128((const __m128i*) params->ssse3.pos5);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
const __m128i v0 = _mm_loadu_si128((const __m128i*) i0);
const __m128i v1 = _mm_loadu_si128((const __m128i*) i1);
const __m128i v2 = _mm_loadu_si128((const __m128i*) i2);
const __m128i v3 = _mm_loadu_si128((const __m128i*) i3);
i0 = (const uint8_t*) ((uintptr_t) i0 + tile_stride);
i1 = (const uint8_t*) ((uintptr_t) i1 + tile_stride);
i2 = (const uint8_t*) ((uintptr_t) i2 + tile_stride);
i3 = (const uint8_t*) ((uintptr_t) i3 + tile_stride);
const __m128i v1_0 = _mm_unpacklo_epi8(v0, v1);
const __m128i v1_1 = _mm_unpackhi_epi8(v0, v1);
const __m128i v1_2 = _mm_unpacklo_epi8(v2, v3);
const __m128i v1_3 = _mm_unpackhi_epi8(v2, v3);
const __m128i v3_0 = _mm_unpacklo_epi8(v1_0, v1_2);
const __m128i v3_1 = _mm_unpackhi_epi8(v1_0, v1_2);
const __m128i v3_2 = _mm_unpacklo_epi8(v1_1, v1_3);
__m128i v4_0 = _mm_shuffle_epi8(v3_0, vperm0);
__m128i v4_1 = _mm_or_si128(_mm_shuffle_epi8(v3_0, vperm2), _mm_shuffle_epi8(v3_1, vperm3));
__m128i v4_2 = _mm_or_si128(_mm_shuffle_epi8(v3_1, vperm4), _mm_shuffle_epi8(v3_2, vperm5));
__m128i v4_3 = _mm_shuffle_epi8(v3_2, vperm1);
_mm_storel_epi64((__m128i*) o3, v4_3);
_mm_storel_epi64((__m128i*) o2, v4_2);
_mm_storel_epi64((__m128i*) o1, v4_1);
_mm_storel_epi64((__m128i*) o0, v4_0);
o3 += 8;
o2 += 8;
o1 += 8;
o0 += 8;
v4_3 = _mm_unpackhi_epi64(v4_3, v4_3);
unaligned_store_u32(o3, (uint32_t) _mm_cvtsi128_si32(v4_3));
v4_2 = _mm_unpackhi_epi64(v4_2, v4_2);
unaligned_store_u32(o2, (uint32_t) _mm_cvtsi128_si32(v4_2));
v4_1 = _mm_unpackhi_epi64(v4_1, v4_1);
unaligned_store_u32(o1, (uint32_t) _mm_cvtsi128_si32(v4_1));
v4_0 = _mm_unpackhi_epi64(v4_0, v4_0);
unaligned_store_u32(o0, (uint32_t) _mm_cvtsi128_si32(v4_0));
o3 += 4;
o2 += 4;
o1 += 4;
o0 += 4;
}
if (bh != 0) {
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
const __m128i v0 = _mm_loadu_si128((const __m128i*) i0);
const __m128i v1 = _mm_loadu_si128((const __m128i*) i1);
const __m128i v2 = _mm_loadu_si128((const __m128i*) i2);
const __m128i v1_0 = _mm_unpacklo_epi8(v0, v1);
const __m128i v1_1 = _mm_unpackhi_epi8(v0, v1);
const __m128i v1_2 = _mm_unpacklo_epi8(v2, v2);
const __m128i v1_3 = _mm_unpackhi_epi8(v2, v2);
const __m128i v3_0 = _mm_unpacklo_epi8(v1_0, v1_2);
const __m128i v3_1 = _mm_unpackhi_epi8(v1_0, v1_2);
const __m128i v3_2 = _mm_unpacklo_epi8(v1_1, v1_3);
__m128i v4_0 = _mm_shuffle_epi8(v3_0, vperm0);
__m128i v4_1 = _mm_or_si128(_mm_shuffle_epi8(v3_0, vperm2), _mm_shuffle_epi8(v3_1, vperm3));
__m128i v4_2 = _mm_or_si128(_mm_shuffle_epi8(v3_1, vperm4), _mm_shuffle_epi8(v3_2, vperm5));
__m128i v4_3 = _mm_shuffle_epi8(v3_2, vperm1);
if (bh & 2) {
unaligned_store_u32(o3, (uint32_t) _mm_cvtsi128_si32(v4_3));
unaligned_store_u32(o2, (uint32_t) _mm_cvtsi128_si32(v4_2));
unaligned_store_u32(o1, (uint32_t) _mm_cvtsi128_si32(v4_1));
unaligned_store_u32(o0, (uint32_t) _mm_cvtsi128_si32(v4_0));
o3 += 4;
o2 += 4;
o1 += 4;
o0 += 4;
unaligned_store_u16(o3, (uint16_t) _mm_extract_epi16(v4_3, 2));
unaligned_store_u16(o2, (uint16_t) _mm_extract_epi16(v4_2, 2));
unaligned_store_u16(o1, (uint16_t) _mm_extract_epi16(v4_1, 2));
unaligned_store_u16(o0, (uint16_t) _mm_extract_epi16(v4_0, 2));
o3 += 2;
o2 += 2;
o1 += 2;
o0 += 2;
v4_3 = _mm_bsrli_si128(v4_3, 6);
v4_2 = _mm_bsrli_si128(v4_2, 6);
v4_1 = _mm_bsrli_si128(v4_1, 6);
v4_0 = _mm_bsrli_si128(v4_0, 6);
}
if (bh & 1) {
unaligned_store_u16(o3, (uint16_t) _mm_cvtsi128_si32(v4_3));
unaligned_store_u16(o2, (uint16_t) _mm_cvtsi128_si32(v4_2));
unaligned_store_u16(o1, (uint16_t) _mm_cvtsi128_si32(v4_1));
unaligned_store_u16(o0, (uint16_t) _mm_cvtsi128_si32(v4_0));
o3 += 2;
o2 += 2;
o1 += 2;
o0 += 2;
*((uint8_t*) o3) = (uint8_t) _mm_cvtsi128_si32(_mm_bsrli_si128(v4_3, 2));
*((uint8_t*) o2) = (uint8_t) _mm_cvtsi128_si32(_mm_bsrli_si128(v4_2, 2));
*((uint8_t*) o1) = (uint8_t) _mm_cvtsi128_si32(_mm_bsrli_si128(v4_1, 2));
*((uint8_t*) o0) = (uint8_t) _mm_cvtsi128_si32(_mm_bsrli_si128(v4_0, 2));
o3 += 1;
o2 += 1;
o1 += 1;
o0 += 1;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
o0 = (uint8_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint8_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint8_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint8_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 7,520
| 39.005319
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/x24-transposec/gen/x24-transposec-1x2-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/x24-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x24_transposec_ukernel__1x2_scalar(
const void *input,
void * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x24_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * 3);
assert(input_stride >= block_width * 3);
const size_t input_reset = 6 - block_height * input_stride;
const size_t output_reset = 2 * output_stride - block_height * 3;
const size_t input_offset = 1 * input_stride;
const uint8_t* i0 = (const uint8_t*) input;
uint8_t* o0 = (uint8_t*) output;
uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 1; bh -= 1) {
o1[0] = i0[3];
o1[1] = i0[4];
o1[2] = i0[5];
o1 += 3;
o0[0] = i0[0];
o0[1] = i0[1];
o0[2] = i0[2];
o0 += 3;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
o0 = (uint8_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint8_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, 2);
} while (block_width != 0);
}
| 1,644
| 25.967213
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x24-transposec/gen/x24-transposec-1x4-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/x24-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x24_transposec_ukernel__1x4_scalar(
const void *input,
void * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x24_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * 3);
assert(input_stride >= block_width * 3);
const size_t input_reset = 12 - block_height * input_stride;
const size_t output_reset = 4 * output_stride - block_height * 3;
const size_t input_offset = 1 * input_stride;
const uint8_t* i0 = (const uint8_t*) input;
uint8_t* o0 = (uint8_t*) output;
uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride);
uint8_t* o2 = (uint8_t*) ((uintptr_t) o1 + output_stride);
uint8_t* o3 = (uint8_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 1; bh -= 1) {
o3[0] = i0[9];
o3[1] = i0[10];
o3[2] = i0[11];
o3 += 3;
o2[0] = i0[6];
o2[1] = i0[7];
o2[2] = i0[8];
o2 += 3;
o1[0] = i0[3];
o1[1] = i0[4];
o1[2] = i0[5];
o1 += 3;
o0[0] = i0[0];
o0[1] = i0[1];
o0[2] = i0[2];
o0 += 3;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
o0 = (uint8_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint8_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint8_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint8_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, 4);
} while (block_width != 0);
}
| 2,162
| 26.379747
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x24-transposec/gen/x24-transposec-2x1-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/x24-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x24_transposec_ukernel__2x1_scalar(
const void *input,
void * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x24_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * 3);
assert(input_stride >= block_width * 3);
const size_t input_reset = 3 - round_down_po2(block_height, 2) * input_stride;
const size_t output_reset = 1 * output_stride - block_height * 3;
const size_t input_offset = 2 * input_stride;
const uint8_t* i0 = (const uint8_t*) input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
uint8_t* o0 = (uint8_t*) output;
do {
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
o0[0] = i0[0];
o0[1] = i0[1];
o0[2] = i0[2];
o0[3] = i1[0];
o0[4] = i1[1];
o0[5] = i1[2];
o0 += 6;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i = i0;
if (bh & 1) {
o0[0] = i[0];
o0[1] = i[1];
o0[2] = i[2];
o0 += 3;
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
o0 = (uint8_t*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, 1);
} while (block_width != 0);
}
| 1,787
| 26.507692
| 80
|
c
|
XNNPACK
|
XNNPACK-master/src/x24-transposec/gen/x24-transposec-2x2-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/x24-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x24_transposec_ukernel__2x2_scalar(
const void *input,
void * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x24_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * 3);
assert(input_stride >= block_width * 3);
const size_t input_reset = 6 - round_down_po2(block_height, 2) * input_stride;
const size_t output_reset = 2 * output_stride - block_height * 3;
const size_t input_offset = 2 * input_stride;
const uint8_t* i0 = (const uint8_t*) input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
uint8_t* o0 = (uint8_t*) output;
uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
o1[0] = i0[3];
o1[1] = i0[4];
o1[2] = i0[5];
o1[3] = i1[3];
o1[4] = i1[4];
o1[5] = i1[5];
o1 += 6;
o0[0] = i0[0];
o0[1] = i0[1];
o0[2] = i0[2];
o0[3] = i1[0];
o0[4] = i1[1];
o0[5] = i1[2];
o0 += 6;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i = i0;
if (bh & 1) {
o1[0] = i[3];
o1[1] = i[4];
o1[2] = i[5];
o1 += 3;
o0[0] = i[0];
o0[1] = i[1];
o0[2] = i[2];
o0 += 3;
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
o0 = (uint8_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint8_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, 2);
} while (block_width != 0);
}
| 2,182
| 25.950617
| 80
|
c
|
XNNPACK
|
XNNPACK-master/src/x24-transposec/gen/x24-transposec-2x4-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/x24-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x24_transposec_ukernel__2x4_scalar(
const void *input,
void * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x24_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * 3);
assert(input_stride >= block_width * 3);
const size_t input_reset = 12 - round_down_po2(block_height, 2) * input_stride;
const size_t output_reset = 4 * output_stride - block_height * 3;
const size_t input_offset = 2 * input_stride;
const uint8_t* i0 = (const uint8_t*) input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
uint8_t* o0 = (uint8_t*) output;
uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride);
uint8_t* o2 = (uint8_t*) ((uintptr_t) o1 + output_stride);
uint8_t* o3 = (uint8_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 2; bh -= 2) {
o3[0] = i0[9];
o3[1] = i0[10];
o3[2] = i0[11];
o3[3] = i1[9];
o3[4] = i1[10];
o3[5] = i1[11];
o3 += 6;
o2[0] = i0[6];
o2[1] = i0[7];
o2[2] = i0[8];
o2[3] = i1[6];
o2[4] = i1[7];
o2[5] = i1[8];
o2 += 6;
o1[0] = i0[3];
o1[1] = i0[4];
o1[2] = i0[5];
o1[3] = i1[3];
o1[4] = i1[4];
o1[5] = i1[5];
o1 += 6;
o0[0] = i0[0];
o0[1] = i0[1];
o0[2] = i0[2];
o0[3] = i1[0];
o0[4] = i1[1];
o0[5] = i1[2];
o0 += 6;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i = i0;
if (bh & 1) {
o3[0] = i[9];
o3[1] = i[10];
o3[2] = i[11];
o3 += 3;
o2[0] = i[6];
o2[1] = i[7];
o2[2] = i[8];
o2 += 3;
o1[0] = i[3];
o1[1] = i[4];
o1[2] = i[5];
o1 += 3;
o0[0] = i[0];
o0[1] = i[1];
o0[2] = i[2];
o0 += 3;
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
o0 = (uint8_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint8_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint8_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint8_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, 4);
} while (block_width != 0);
}
| 2,980
| 25.380531
| 81
|
c
|
XNNPACK
|
XNNPACK-master/src/x24-transposec/gen/x24-transposec-4x1-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/x24-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x24_transposec_ukernel__4x1_scalar(
const void *input,
void * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x24_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * 3);
assert(input_stride >= block_width * 3);
const size_t input_reset = 3 - round_down_po2(block_height, 4) * input_stride;
const size_t output_reset = 1 * output_stride - block_height * 3;
const size_t input_offset = 4 * input_stride;
const uint8_t* i0 = (const uint8_t*) input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
uint8_t* o0 = (uint8_t*) output;
do {
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
o0[0] = i0[0];
o0[1] = i0[1];
o0[2] = i0[2];
o0[3] = i1[0];
o0[4] = i1[1];
o0[5] = i1[2];
o0[6] = i2[0];
o0[7] = i2[1];
o0[8] = i2[2];
o0[9] = i3[0];
o0[10] = i3[1];
o0[11] = i3[2];
o0 += 12;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i = i0;
if (bh & 2) {
o0[0] = i0[0];
o0[1] = i0[1];
o0[2] = i0[2];
o0[3] = i1[0];
o0[4] = i1[1];
o0[5] = i1[2];
o0 += 6;
i = i2;
}
if (bh & 1) {
o0[0] = i[0];
o0[1] = i[1];
o0[2] = i[2];
o0 += 3;
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
o0 = (uint8_t*) ((uintptr_t) o0 + output_reset);
block_width = doz(block_width, 1);
} while (block_width != 0);
}
| 2,479
| 27.505747
| 80
|
c
|
XNNPACK
|
XNNPACK-master/src/x24-transposec/gen/x24-transposec-4x2-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/x24-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x24_transposec_ukernel__4x2_scalar(
const void *input,
void * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x24_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * 3);
assert(input_stride >= block_width * 3);
const size_t input_reset = 6 - round_down_po2(block_height, 4) * input_stride;
const size_t output_reset = 2 * output_stride - block_height * 3;
const size_t input_offset = 4 * input_stride;
const uint8_t* i0 = (const uint8_t*) input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
uint8_t* o0 = (uint8_t*) output;
uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
o1[0] = i0[3];
o1[1] = i0[4];
o1[2] = i0[5];
o1[3] = i1[3];
o1[4] = i1[4];
o1[5] = i1[5];
o1[6] = i2[3];
o1[7] = i2[4];
o1[8] = i2[5];
o1[9] = i3[3];
o1[10] = i3[4];
o1[11] = i3[5];
o1 += 12;
o0[0] = i0[0];
o0[1] = i0[1];
o0[2] = i0[2];
o0[3] = i1[0];
o0[4] = i1[1];
o0[5] = i1[2];
o0[6] = i2[0];
o0[7] = i2[1];
o0[8] = i2[2];
o0[9] = i3[0];
o0[10] = i3[1];
o0[11] = i3[2];
o0 += 12;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i = i0;
if (bh & 2) {
o1[0] = i0[3];
o1[1] = i0[4];
o1[2] = i0[5];
o1[3] = i1[3];
o1[4] = i1[4];
o1[5] = i1[5];
o1 += 6;
o0[0] = i0[0];
o0[1] = i0[1];
o0[2] = i0[2];
o0[3] = i1[0];
o0[4] = i1[1];
o0[5] = i1[2];
o0 += 6;
i = i2;
}
if (bh & 1) {
o1[0] = i[3];
o1[1] = i[4];
o1[2] = i[5];
o1 += 3;
o0[0] = i[0];
o0[1] = i[1];
o0[2] = i[2];
o0 += 3;
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
o0 = (uint8_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint8_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, 2);
} while (block_width != 0);
}
| 3,144
| 26.112069
| 80
|
c
|
XNNPACK
|
XNNPACK-master/src/x24-transposec/gen/x24-transposec-4x4-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/x24-transposec/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x24_transposec_ukernel__4x4_scalar(
const void *input,
void * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x24_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * 3);
assert(input_stride >= block_width * 3);
const size_t input_reset = 12 - round_down_po2(block_height, 4) * input_stride;
const size_t output_reset = 4 * output_stride - block_height * 3;
const size_t input_offset = 4 * input_stride;
const uint8_t* i0 = (const uint8_t*) input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
uint8_t* o0 = (uint8_t*) output;
uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride);
uint8_t* o2 = (uint8_t*) ((uintptr_t) o1 + output_stride);
uint8_t* o3 = (uint8_t*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
o3[0] = i0[9];
o3[1] = i0[10];
o3[2] = i0[11];
o3[3] = i1[9];
o3[4] = i1[10];
o3[5] = i1[11];
o3[6] = i2[9];
o3[7] = i2[10];
o3[8] = i2[11];
o3[9] = i3[9];
o3[10] = i3[10];
o3[11] = i3[11];
o3 += 12;
o2[0] = i0[6];
o2[1] = i0[7];
o2[2] = i0[8];
o2[3] = i1[6];
o2[4] = i1[7];
o2[5] = i1[8];
o2[6] = i2[6];
o2[7] = i2[7];
o2[8] = i2[8];
o2[9] = i3[6];
o2[10] = i3[7];
o2[11] = i3[8];
o2 += 12;
o1[0] = i0[3];
o1[1] = i0[4];
o1[2] = i0[5];
o1[3] = i1[3];
o1[4] = i1[4];
o1[5] = i1[5];
o1[6] = i2[3];
o1[7] = i2[4];
o1[8] = i2[5];
o1[9] = i3[3];
o1[10] = i3[4];
o1[11] = i3[5];
o1 += 12;
o0[0] = i0[0];
o0[1] = i0[1];
o0[2] = i0[2];
o0[3] = i1[0];
o0[4] = i1[1];
o0[5] = i1[2];
o0[6] = i2[0];
o0[7] = i2[1];
o0[8] = i2[2];
o0[9] = i3[0];
o0[10] = i3[1];
o0[11] = i3[2];
o0 += 12;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i = i0;
if (bh & 2) {
o3[0] = i0[9];
o3[1] = i0[10];
o3[2] = i0[11];
o3[3] = i1[9];
o3[4] = i1[10];
o3[5] = i1[11];
o3 += 6;
o2[0] = i0[6];
o2[1] = i0[7];
o2[2] = i0[8];
o2[3] = i1[6];
o2[4] = i1[7];
o2[5] = i1[8];
o2 += 6;
o1[0] = i0[3];
o1[1] = i0[4];
o1[2] = i0[5];
o1[3] = i1[3];
o1[4] = i1[4];
o1[5] = i1[5];
o1 += 6;
o0[0] = i0[0];
o0[1] = i0[1];
o0[2] = i0[2];
o0[3] = i1[0];
o0[4] = i1[1];
o0[5] = i1[2];
o0 += 6;
i = i2;
}
if (bh & 1) {
o3[0] = i[9];
o3[1] = i[10];
o3[2] = i[11];
o3 += 3;
o2[0] = i[6];
o2[1] = i[7];
o2[2] = i[8];
o2 += 3;
o1[0] = i[3];
o1[1] = i[4];
o1[2] = i[5];
o1 += 3;
o0[0] = i[0];
o0[1] = i[1];
o0[2] = i[2];
o0 += 3;
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
o0 = (uint8_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint8_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint8_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint8_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, 4);
} while (block_width != 0);
}
| 4,490
| 24.810345
| 81
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packb/gen/x32-packb-2c1s1r-gemm-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_packb_gemm_ukernel_2c1s1r__scalar_float(
size_t groups,
size_t channels,
const uint32_t* bias,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
float* w = (float*) packed_weights;
const float* b = (const float*) bias;
do {
// channel tile loop multiple of 2
size_t c = channels;
for (; c >= 2; c -= 2) {
const float b0 = b[0];
const float b1 = b[1];
unaligned_indexed_store_f32(w, 0, b0);
unaligned_indexed_store_f32(w, 1, b1);
b += 2;
w = (float*) ((uintptr_t) w + channel_tile_stride);
}
// channel subtile loop multiple of 1
if (c != 0) {
const float b0 = b[0];
unaligned_indexed_store_f32(w, 0, b0);
b += 1;
w = (float*) ((uintptr_t) w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,386
| 23.767857
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packb/gen/x32-packb-2c1s1r-gemm-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_packb_gemm_ukernel_2c1s1r__scalar_int(
size_t groups,
size_t channels,
const uint32_t* bias,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
uint32_t* w = (uint32_t*) packed_weights;
const uint32_t* b = (const uint32_t*) bias;
do {
// channel tile loop multiple of 2
size_t c = channels;
for (; c >= 2; c -= 2) {
const uint32_t b0 = b[0];
const uint32_t b1 = b[1];
unaligned_indexed_store_u32(w, 0, b0);
unaligned_indexed_store_u32(w, 1, b1);
b += 2;
w = (uint32_t*) ((uintptr_t) w + channel_tile_stride);
}
// channel subtile loop multiple of 1
if (c != 0) {
const uint32_t b0 = b[0];
unaligned_indexed_store_u32(w, 0, b0);
b += 1;
w = (uint32_t*) ((uintptr_t) w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,411
| 24.214286
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packb/gen/x32-packb-2c2s1r-gemm-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_packb_gemm_ukernel_2c2s1r__scalar_float(
size_t groups,
size_t channels,
const uint32_t* bias,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
float* w = (float*) packed_weights;
const float* b = (const float*) bias;
do {
// channel tile loop multiple of 2
size_t c = channels;
for (; c >= 2; c -= 2) {
const float b0 = b[0];
const float b1 = b[1];
unaligned_indexed_store_f32(w, 0, b0);
unaligned_indexed_store_f32(w, 1, b1);
b += 2;
w = (float*) ((uintptr_t) w + channel_tile_stride);
}
if XNN_UNLIKELY(c != 0) {
const float b0 = b[0];
unaligned_indexed_store_f32(w, 0, b0);
b += 1;
w = (float*) ((uintptr_t) w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,355
| 24.111111
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packb/gen/x32-packb-2c2s1r-gemm-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_packb_gemm_ukernel_2c2s1r__scalar_int(
size_t groups,
size_t channels,
const uint32_t* bias,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
uint32_t* w = (uint32_t*) packed_weights;
const uint32_t* b = (const uint32_t*) bias;
do {
// channel tile loop multiple of 2
size_t c = channels;
for (; c >= 2; c -= 2) {
const uint32_t b0 = b[0];
const uint32_t b1 = b[1];
unaligned_indexed_store_u32(w, 0, b0);
unaligned_indexed_store_u32(w, 1, b1);
b += 2;
w = (uint32_t*) ((uintptr_t) w + channel_tile_stride);
}
if XNN_UNLIKELY(c != 0) {
const uint32_t b0 = b[0];
unaligned_indexed_store_u32(w, 0, b0);
b += 1;
w = (uint32_t*) ((uintptr_t) w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,380
| 24.574074
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packb/gen/x32-packb-4c1s1r-gemm-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_packb_gemm_ukernel_4c1s1r__scalar_float(
size_t groups,
size_t channels,
const uint32_t* bias,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
float* w = (float*) packed_weights;
const float* b = (const float*) bias;
do {
// channel tile loop multiple of 4
size_t c = channels;
for (; c >= 4; c -= 4) {
const float b0 = b[0];
const float b1 = b[1];
const float b2 = b[2];
const float b3 = b[3];
unaligned_indexed_store_f32(w, 0, b0);
unaligned_indexed_store_f32(w, 1, b1);
unaligned_indexed_store_f32(w, 2, b2);
unaligned_indexed_store_f32(w, 3, b3);
b += 4;
w = (float*) ((uintptr_t) w + channel_tile_stride);
}
// channel subtile loop multiple of 1
for (; c >= 1; c -= 1) {
const float b0 = b[0];
unaligned_indexed_store_f32(w, 0, b0);
b += 1;
w = (float*) ((uintptr_t) w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,545
| 24.766667
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packb/gen/x32-packb-4c1s1r-gemm-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_packb_gemm_ukernel_4c1s1r__scalar_int(
size_t groups,
size_t channels,
const uint32_t* bias,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
uint32_t* w = (uint32_t*) packed_weights;
const uint32_t* b = (const uint32_t*) bias;
do {
// channel tile loop multiple of 4
size_t c = channels;
for (; c >= 4; c -= 4) {
const uint32_t b0 = b[0];
const uint32_t b1 = b[1];
const uint32_t b2 = b[2];
const uint32_t b3 = b[3];
unaligned_indexed_store_u32(w, 0, b0);
unaligned_indexed_store_u32(w, 1, b1);
unaligned_indexed_store_u32(w, 2, b2);
unaligned_indexed_store_u32(w, 3, b3);
b += 4;
w = (uint32_t*) ((uintptr_t) w + channel_tile_stride);
}
// channel subtile loop multiple of 1
for (; c >= 1; c -= 1) {
const uint32_t b0 = b[0];
unaligned_indexed_store_u32(w, 0, b0);
b += 1;
w = (uint32_t*) ((uintptr_t) w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,576
| 25.283333
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packb/gen/x32-packb-4c4s1r-gemm-scalar-float.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_packb_gemm_ukernel_4c4s1r__scalar_float(
size_t groups,
size_t channels,
const uint32_t* bias,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
float* w = (float*) packed_weights;
const float* b = (const float*) bias;
do {
// channel tile loop multiple of 4
size_t c = channels;
for (; c >= 4; c -= 4) {
const float b0 = b[0];
const float b1 = b[1];
const float b2 = b[2];
const float b3 = b[3];
unaligned_indexed_store_f32(w, 0, b0);
unaligned_indexed_store_f32(w, 1, b1);
unaligned_indexed_store_f32(w, 2, b2);
unaligned_indexed_store_f32(w, 3, b3);
b += 4;
w = (float*) ((uintptr_t) w + channel_tile_stride);
}
if XNN_UNLIKELY(c != 0) {
// channels remainder (1..3)
float* prev_w = w;
if (c & 2) {
float b0 = b[0];
float b1 = b[1];
unaligned_indexed_store_f32(w, 0, b0);
unaligned_indexed_store_f32(w, 1, b1);
b += 2;
w += 2;
}
if (c & 1) {
float b0 = b[0];
unaligned_indexed_store_f32(w, 0, b0);
b += 1;
w += 1;
}
w = (float*) ((uintptr_t) prev_w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,815
| 24.222222
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packb/gen/x32-packb-4c4s1r-gemm-scalar-int.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packb/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/packb.h>
#include <xnnpack/unaligned.h>
void xnn_x32_packb_gemm_ukernel_4c4s1r__scalar_int(
size_t groups,
size_t channels,
const uint32_t* bias,
uint32_t* packed_weights,
size_t channel_tile_stride,
size_t channel_subtile_stride,
const union xnn_x32_packb_params* params)
{
assert(groups != 0);
assert(channels != 0);
assert(packed_weights != NULL);
uint32_t* w = (uint32_t*) packed_weights;
const uint32_t* b = (const uint32_t*) bias;
do {
// channel tile loop multiple of 4
size_t c = channels;
for (; c >= 4; c -= 4) {
const uint32_t b0 = b[0];
const uint32_t b1 = b[1];
const uint32_t b2 = b[2];
const uint32_t b3 = b[3];
unaligned_indexed_store_u32(w, 0, b0);
unaligned_indexed_store_u32(w, 1, b1);
unaligned_indexed_store_u32(w, 2, b2);
unaligned_indexed_store_u32(w, 3, b3);
b += 4;
w = (uint32_t*) ((uintptr_t) w + channel_tile_stride);
}
if XNN_UNLIKELY(c != 0) {
// channels remainder (1..3)
uint32_t* prev_w = w;
if (c & 2) {
uint32_t b0 = b[0];
uint32_t b1 = b[1];
unaligned_indexed_store_u32(w, 0, b0);
unaligned_indexed_store_u32(w, 1, b1);
b += 2;
w += 2;
}
if (c & 1) {
uint32_t b0 = b[0];
unaligned_indexed_store_u32(w, 0, b0);
b += 1;
w += 1;
}
w = (uint32_t*) ((uintptr_t) prev_w + channel_subtile_stride);
}
} while (--groups != 0);
}
| 1,855
| 24.777778
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x12-gemm-goi-neon-ld4lane-x4-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
#include <xnnpack/prefetch.h>
void xnn_x32_packw_gemm_goi_ukernel_x12__neon_ld4lane_x4_prfm(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 12);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint32x4x4_t vtmp0123x0123;
uint32x4x4_t vtmp0123x4567;
uint32x4x4_t vtmp0123x89AB;
do {
// NC main loop multiple of 12
const uint32_t* w0 = weights;
size_t n = nc;
for (; n >= 12; n -= 12) {
if XNN_LIKELY(bias != NULL) {
uint32x4_t vb0 = vld1q_u32(bias); bias += 4;
uint32x4_t vb4 = vld1q_u32(bias); bias += 4;
uint32x4_t vb8 = vld1q_u32(bias); bias += 4;
vst1q_u32(packed_weights, vb0); packed_weights += 4;
vst1q_u32(packed_weights, vb4); packed_weights += 4;
vst1q_u32(packed_weights, vb8); packed_weights += 4;
} else {
const uint32x4_t vzero = vmovq_n_u32(0);
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
}
const uint32_t* w1 = w0 + kc;
const uint32_t* w2 = w1 + kc;
const uint32_t* w3 = w2 + kc;
const uint32_t* w4 = w3 + kc;
const uint32_t* w5 = w4 + kc;
const uint32_t* w6 = w5 + kc;
const uint32_t* w7 = w6 + kc;
const uint32_t* w8 = w7 + kc;
const uint32_t* w9 = w8 + kc;
const uint32_t* w10 = w9 + kc;
const uint32_t* w11 = w10 + kc;
xnn_prefetch_to_l1((const int8_t*) w0);
xnn_prefetch_to_l1((const int8_t*) w0 + 64);
xnn_prefetch_to_l1((const int8_t*) w1);
xnn_prefetch_to_l1((const int8_t*) w1 + 64);
xnn_prefetch_to_l1((const int8_t*) w2);
xnn_prefetch_to_l1((const int8_t*) w2 + 64);
xnn_prefetch_to_l1((const int8_t*) w3);
xnn_prefetch_to_l1((const int8_t*) w3 + 64);
xnn_prefetch_to_l1((const int8_t*) w4);
xnn_prefetch_to_l1((const int8_t*) w4 + 64);
xnn_prefetch_to_l1((const int8_t*) w5);
xnn_prefetch_to_l1((const int8_t*) w5 + 64);
xnn_prefetch_to_l1((const int8_t*) w6);
xnn_prefetch_to_l1((const int8_t*) w6 + 64);
xnn_prefetch_to_l1((const int8_t*) w7);
xnn_prefetch_to_l1((const int8_t*) w7 + 64);
xnn_prefetch_to_l1((const int8_t*) w8);
xnn_prefetch_to_l1((const int8_t*) w8 + 64);
xnn_prefetch_to_l1((const int8_t*) w9);
xnn_prefetch_to_l1((const int8_t*) w9 + 64);
xnn_prefetch_to_l1((const int8_t*) w10);
xnn_prefetch_to_l1((const int8_t*) w10 + 64);
xnn_prefetch_to_l1((const int8_t*) w11);
xnn_prefetch_to_l1((const int8_t*) w11 + 64);
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vtmp0123x4567 = vld4q_lane_u32(w7, vtmp0123x4567, 3); w7 += 4;
vtmp0123x89AB = vld4q_lane_u32(w8, vtmp0123x89AB, 0); w8 += 4;
vtmp0123x89AB = vld4q_lane_u32(w9, vtmp0123x89AB, 1); w9 += 4;
vtmp0123x89AB = vld4q_lane_u32(w10, vtmp0123x89AB, 2); w10 += 4;
vtmp0123x89AB = vld4q_lane_u32(w11, vtmp0123x89AB, 3); w11 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
xnn_prefetch_to_l1((const int8_t*) w7 + 128);
xnn_prefetch_to_l1((const int8_t*) w8 + 128);
xnn_prefetch_to_l1((const int8_t*) w9 + 128);
xnn_prefetch_to_l1((const int8_t*) w10 + 128);
xnn_prefetch_to_l1((const int8_t*) w11 + 128);
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[3]); packed_weights += 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 1
case 1:
{
uint32x4_t vtmp0x0123 = vdupq_n_u32(0);
uint32x4_t vtmp0x4567 = vdupq_n_u32(0);
uint32x4_t vtmp0x89AB = vdupq_n_u32(0);
vtmp0x0123 = vld1q_lane_u32(w0, vtmp0x0123, 0); w0 += 1;
vtmp0x0123 = vld1q_lane_u32(w1, vtmp0x0123, 1); w1 += 1;
vtmp0x0123 = vld1q_lane_u32(w2, vtmp0x0123, 2); w2 += 1;
vtmp0x0123 = vld1q_lane_u32(w3, vtmp0x0123, 3); w3 += 1;
vtmp0x4567 = vld1q_lane_u32(w4, vtmp0x4567, 0); w4 += 1;
vtmp0x4567 = vld1q_lane_u32(w5, vtmp0x4567, 1); w5 += 1;
vtmp0x4567 = vld1q_lane_u32(w6, vtmp0x4567, 2); w6 += 1;
vtmp0x4567 = vld1q_lane_u32(w7, vtmp0x4567, 3); w7 += 1;
vtmp0x89AB = vld1q_lane_u32(w8, vtmp0x89AB, 0); w8 += 1;
vtmp0x89AB = vld1q_lane_u32(w9, vtmp0x89AB, 1); w9 += 1;
vtmp0x89AB = vld1q_lane_u32(w10, vtmp0x89AB, 2); w10 += 1;
vtmp0x89AB = vld1q_lane_u32(w11, vtmp0x89AB, 3); w11 += 1;
vst1q_u32(packed_weights, vtmp0x0123); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x4567); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x89AB); packed_weights += 4;
break;
}
// KC remainder of 2
case 2:
{
uint32x4x2_t vtmp01x0123;
vtmp01x0123.val[0] = vdupq_n_u32(0);
vtmp01x0123.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x4567;
vtmp01x4567.val[0] = vdupq_n_u32(0);
vtmp01x4567.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x89AB;
vtmp01x89AB.val[0] = vdupq_n_u32(0);
vtmp01x89AB.val[1] = vdupq_n_u32(0);
vtmp01x0123 = vld2q_lane_u32(w0, vtmp01x0123, 0); w0 += 2;
vtmp01x0123 = vld2q_lane_u32(w1, vtmp01x0123, 1); w1 += 2;
vtmp01x0123 = vld2q_lane_u32(w2, vtmp01x0123, 2); w2 += 2;
vtmp01x0123 = vld2q_lane_u32(w3, vtmp01x0123, 3); w3 += 2;
vtmp01x4567 = vld2q_lane_u32(w4, vtmp01x4567, 0); w4 += 2;
vtmp01x4567 = vld2q_lane_u32(w5, vtmp01x4567, 1); w5 += 2;
vtmp01x4567 = vld2q_lane_u32(w6, vtmp01x4567, 2); w6 += 2;
vtmp01x4567 = vld2q_lane_u32(w7, vtmp01x4567, 3); w7 += 2;
vtmp01x89AB = vld2q_lane_u32(w8, vtmp01x89AB, 0); w8 += 2;
vtmp01x89AB = vld2q_lane_u32(w9, vtmp01x89AB, 1); w9 += 2;
vtmp01x89AB = vld2q_lane_u32(w10, vtmp01x89AB, 2); w10 += 2;
vtmp01x89AB = vld2q_lane_u32(w11, vtmp01x89AB, 3); w11 += 2;
vst1q_u32(packed_weights, vtmp01x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x89AB.val[1]); packed_weights += 4;
break;
}
// KC remainder of 3
case 3:
{
uint32x4x3_t vtmp012x0123;
vtmp012x0123.val[0] = vdupq_n_u32(0);
vtmp012x0123.val[1] = vdupq_n_u32(0);
vtmp012x0123.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x4567;
vtmp012x4567.val[0] = vdupq_n_u32(0);
vtmp012x4567.val[1] = vdupq_n_u32(0);
vtmp012x4567.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x89AB;
vtmp012x89AB.val[0] = vdupq_n_u32(0);
vtmp012x89AB.val[1] = vdupq_n_u32(0);
vtmp012x89AB.val[2] = vdupq_n_u32(0);
vtmp012x0123 = vld3q_lane_u32(w0, vtmp012x0123, 0); w0 += 3;
vtmp012x0123 = vld3q_lane_u32(w1, vtmp012x0123, 1); w1 += 3;
vtmp012x0123 = vld3q_lane_u32(w2, vtmp012x0123, 2); w2 += 3;
vtmp012x0123 = vld3q_lane_u32(w3, vtmp012x0123, 3); w3 += 3;
vtmp012x4567 = vld3q_lane_u32(w4, vtmp012x4567, 0); w4 += 3;
vtmp012x4567 = vld3q_lane_u32(w5, vtmp012x4567, 1); w5 += 3;
vtmp012x4567 = vld3q_lane_u32(w6, vtmp012x4567, 2); w6 += 3;
vtmp012x4567 = vld3q_lane_u32(w7, vtmp012x4567, 3); w7 += 3;
vtmp012x89AB = vld3q_lane_u32(w8, vtmp012x89AB, 0); w8 += 3;
vtmp012x89AB = vld3q_lane_u32(w9, vtmp012x89AB, 1); w9 += 3;
vtmp012x89AB = vld3q_lane_u32(w10, vtmp012x89AB, 2); w10 += 3;
vtmp012x89AB = vld3q_lane_u32(w11, vtmp012x89AB, 3); w11 += 3;
vst1q_u32(packed_weights, vtmp012x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[2]); packed_weights += 4;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w11;
}
// NC remainder (1..11)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 11);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (12 - n);
} else {
const uint32x4_t vzero = vmovq_n_u32(0);
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
}
// NR remainder has less than 12 rows so last row is not loaded
const uint32_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint32_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint32_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint32_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint32_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint32_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
const uint32_t* w7 = w6 + kc;
if XNN_UNPREDICTABLE(n < 8) {
w7 = w6;
}
const uint32_t* w8 = w7 + kc;
if XNN_UNPREDICTABLE(n <= 8) {
w8 = w7;
}
const uint32_t* w9 = w8 + kc;
if XNN_UNPREDICTABLE(n < 10) {
w9 = w8;
}
const uint32_t* w10 = w9 + kc;
if XNN_UNPREDICTABLE(n <= 10) {
w10 = w9;
}
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vtmp0123x4567 = vld4q_lane_u32(w7, vtmp0123x4567, 3); w7 += 4;
vtmp0123x89AB = vld4q_lane_u32(w8, vtmp0123x89AB, 0); w8 += 4;
vtmp0123x89AB = vld4q_lane_u32(w9, vtmp0123x89AB, 1); w9 += 4;
vtmp0123x89AB = vld4q_lane_u32(w10, vtmp0123x89AB, 2); w10 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
xnn_prefetch_to_l1((const int8_t*) w7 + 128);
xnn_prefetch_to_l1((const int8_t*) w8 + 128);
xnn_prefetch_to_l1((const int8_t*) w9 + 128);
xnn_prefetch_to_l1((const int8_t*) w10 + 128);
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[3]); packed_weights += 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 1
case 1:
{
uint32x4_t vtmp0x0123 = vdupq_n_u32(0);
uint32x4_t vtmp0x4567 = vdupq_n_u32(0);
uint32x4_t vtmp0x89AB = vdupq_n_u32(0);
vtmp0x0123 = vld1q_lane_u32(w0, vtmp0x0123, 0);
vtmp0x0123 = vld1q_lane_u32(w1, vtmp0x0123, 1);
vtmp0x0123 = vld1q_lane_u32(w2, vtmp0x0123, 2);
vtmp0x0123 = vld1q_lane_u32(w3, vtmp0x0123, 3);
vtmp0x4567 = vld1q_lane_u32(w4, vtmp0x4567, 0);
vtmp0x4567 = vld1q_lane_u32(w5, vtmp0x4567, 1);
vtmp0x4567 = vld1q_lane_u32(w6, vtmp0x4567, 2);
vtmp0x4567 = vld1q_lane_u32(w7, vtmp0x4567, 3);
vtmp0x89AB = vld1q_lane_u32(w8, vtmp0x89AB, 0);
vtmp0x89AB = vld1q_lane_u32(w9, vtmp0x89AB, 1);
vtmp0x89AB = vld1q_lane_u32(w10, vtmp0x89AB, 2);
vst1q_u32(packed_weights, vtmp0x0123); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x4567); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x89AB); packed_weights += 4;
break;
}
// KC remainder of 2
case 2:
{
uint32x4x2_t vtmp01x0123;
vtmp01x0123.val[0] = vdupq_n_u32(0);
vtmp01x0123.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x4567;
vtmp01x4567.val[0] = vdupq_n_u32(0);
vtmp01x4567.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x89AB;
vtmp01x89AB.val[0] = vdupq_n_u32(0);
vtmp01x89AB.val[1] = vdupq_n_u32(0);
vtmp01x0123 = vld2q_lane_u32(w0, vtmp01x0123, 0);
vtmp01x0123 = vld2q_lane_u32(w1, vtmp01x0123, 1);
vtmp01x0123 = vld2q_lane_u32(w2, vtmp01x0123, 2);
vtmp01x0123 = vld2q_lane_u32(w3, vtmp01x0123, 3);
vtmp01x4567 = vld2q_lane_u32(w4, vtmp01x4567, 0);
vtmp01x4567 = vld2q_lane_u32(w5, vtmp01x4567, 1);
vtmp01x4567 = vld2q_lane_u32(w6, vtmp01x4567, 2);
vtmp01x4567 = vld2q_lane_u32(w7, vtmp01x4567, 3);
vtmp01x89AB = vld2q_lane_u32(w8, vtmp01x89AB, 0);
vtmp01x89AB = vld2q_lane_u32(w9, vtmp01x89AB, 1);
vtmp01x89AB = vld2q_lane_u32(w10, vtmp01x89AB, 2);
vst1q_u32(packed_weights, vtmp01x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x89AB.val[1]); packed_weights += 4;
break;
}
// KC remainder of 3
case 3:
{
uint32x4x3_t vtmp012x0123;
vtmp012x0123.val[0] = vdupq_n_u32(0);
vtmp012x0123.val[1] = vdupq_n_u32(0);
vtmp012x0123.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x4567;
vtmp012x4567.val[0] = vdupq_n_u32(0);
vtmp012x4567.val[1] = vdupq_n_u32(0);
vtmp012x4567.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x89AB;
vtmp012x89AB.val[0] = vdupq_n_u32(0);
vtmp012x89AB.val[1] = vdupq_n_u32(0);
vtmp012x89AB.val[2] = vdupq_n_u32(0);
vtmp012x0123 = vld3q_lane_u32(w0, vtmp012x0123, 0);
vtmp012x0123 = vld3q_lane_u32(w1, vtmp012x0123, 1);
vtmp012x0123 = vld3q_lane_u32(w2, vtmp012x0123, 2);
vtmp012x0123 = vld3q_lane_u32(w3, vtmp012x0123, 3);
vtmp012x4567 = vld3q_lane_u32(w4, vtmp012x4567, 0);
vtmp012x4567 = vld3q_lane_u32(w5, vtmp012x4567, 1);
vtmp012x4567 = vld3q_lane_u32(w6, vtmp012x4567, 2);
vtmp012x4567 = vld3q_lane_u32(w7, vtmp012x4567, 3);
vtmp012x89AB = vld3q_lane_u32(w8, vtmp012x89AB, 0);
vtmp012x89AB = vld3q_lane_u32(w9, vtmp012x89AB, 1);
vtmp012x89AB = vld3q_lane_u32(w10, vtmp012x89AB, 2);
vst1q_u32(packed_weights, vtmp012x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[2]); packed_weights += 4;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 20,829
| 44.881057
| 80
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x12-gemm-goi-neon-ld4lane-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x12__neon_ld4lane_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 12);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint32x4x4_t vtmp0123x0123;
uint32x4x4_t vtmp0123x4567;
uint32x4x4_t vtmp0123x89AB;
do {
// NC main loop multiple of 12
const uint32_t* w0 = weights;
size_t n = nc;
for (; n >= 12; n -= 12) {
if XNN_LIKELY(bias != NULL) {
uint32x4_t vb0 = vld1q_u32(bias); bias += 4;
uint32x4_t vb4 = vld1q_u32(bias); bias += 4;
uint32x4_t vb8 = vld1q_u32(bias); bias += 4;
vst1q_u32(packed_weights, vb0); packed_weights += 4;
vst1q_u32(packed_weights, vb4); packed_weights += 4;
vst1q_u32(packed_weights, vb8); packed_weights += 4;
} else {
const uint32x4_t vzero = vmovq_n_u32(0);
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
}
const uint32_t* w1 = w0 + kc;
const uint32_t* w2 = w1 + kc;
const uint32_t* w3 = w2 + kc;
const uint32_t* w4 = w3 + kc;
const uint32_t* w5 = w4 + kc;
const uint32_t* w6 = w5 + kc;
const uint32_t* w7 = w6 + kc;
const uint32_t* w8 = w7 + kc;
const uint32_t* w9 = w8 + kc;
const uint32_t* w10 = w9 + kc;
const uint32_t* w11 = w10 + kc;
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vtmp0123x4567 = vld4q_lane_u32(w7, vtmp0123x4567, 3); w7 += 4;
vtmp0123x89AB = vld4q_lane_u32(w8, vtmp0123x89AB, 0); w8 += 4;
vtmp0123x89AB = vld4q_lane_u32(w9, vtmp0123x89AB, 1); w9 += 4;
vtmp0123x89AB = vld4q_lane_u32(w10, vtmp0123x89AB, 2); w10 += 4;
vtmp0123x89AB = vld4q_lane_u32(w11, vtmp0123x89AB, 3); w11 += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[3]); packed_weights += 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 1
case 1:
{
uint32x4_t vtmp0x0123 = vdupq_n_u32(0);
uint32x4_t vtmp0x4567 = vdupq_n_u32(0);
uint32x4_t vtmp0x89AB = vdupq_n_u32(0);
vtmp0x0123 = vld1q_lane_u32(w0, vtmp0x0123, 0); w0 += 1;
vtmp0x0123 = vld1q_lane_u32(w1, vtmp0x0123, 1); w1 += 1;
vtmp0x0123 = vld1q_lane_u32(w2, vtmp0x0123, 2); w2 += 1;
vtmp0x0123 = vld1q_lane_u32(w3, vtmp0x0123, 3); w3 += 1;
vtmp0x4567 = vld1q_lane_u32(w4, vtmp0x4567, 0); w4 += 1;
vtmp0x4567 = vld1q_lane_u32(w5, vtmp0x4567, 1); w5 += 1;
vtmp0x4567 = vld1q_lane_u32(w6, vtmp0x4567, 2); w6 += 1;
vtmp0x4567 = vld1q_lane_u32(w7, vtmp0x4567, 3); w7 += 1;
vtmp0x89AB = vld1q_lane_u32(w8, vtmp0x89AB, 0); w8 += 1;
vtmp0x89AB = vld1q_lane_u32(w9, vtmp0x89AB, 1); w9 += 1;
vtmp0x89AB = vld1q_lane_u32(w10, vtmp0x89AB, 2); w10 += 1;
vtmp0x89AB = vld1q_lane_u32(w11, vtmp0x89AB, 3); w11 += 1;
vst1q_u32(packed_weights, vtmp0x0123); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x4567); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x89AB); packed_weights += 4;
break;
}
// KC remainder of 2
case 2:
{
uint32x4x2_t vtmp01x0123;
vtmp01x0123.val[0] = vdupq_n_u32(0);
vtmp01x0123.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x4567;
vtmp01x4567.val[0] = vdupq_n_u32(0);
vtmp01x4567.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x89AB;
vtmp01x89AB.val[0] = vdupq_n_u32(0);
vtmp01x89AB.val[1] = vdupq_n_u32(0);
vtmp01x0123 = vld2q_lane_u32(w0, vtmp01x0123, 0); w0 += 2;
vtmp01x0123 = vld2q_lane_u32(w1, vtmp01x0123, 1); w1 += 2;
vtmp01x0123 = vld2q_lane_u32(w2, vtmp01x0123, 2); w2 += 2;
vtmp01x0123 = vld2q_lane_u32(w3, vtmp01x0123, 3); w3 += 2;
vtmp01x4567 = vld2q_lane_u32(w4, vtmp01x4567, 0); w4 += 2;
vtmp01x4567 = vld2q_lane_u32(w5, vtmp01x4567, 1); w5 += 2;
vtmp01x4567 = vld2q_lane_u32(w6, vtmp01x4567, 2); w6 += 2;
vtmp01x4567 = vld2q_lane_u32(w7, vtmp01x4567, 3); w7 += 2;
vtmp01x89AB = vld2q_lane_u32(w8, vtmp01x89AB, 0); w8 += 2;
vtmp01x89AB = vld2q_lane_u32(w9, vtmp01x89AB, 1); w9 += 2;
vtmp01x89AB = vld2q_lane_u32(w10, vtmp01x89AB, 2); w10 += 2;
vtmp01x89AB = vld2q_lane_u32(w11, vtmp01x89AB, 3); w11 += 2;
vst1q_u32(packed_weights, vtmp01x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x89AB.val[1]); packed_weights += 4;
break;
}
// KC remainder of 3
case 3:
{
uint32x4x3_t vtmp012x0123;
vtmp012x0123.val[0] = vdupq_n_u32(0);
vtmp012x0123.val[1] = vdupq_n_u32(0);
vtmp012x0123.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x4567;
vtmp012x4567.val[0] = vdupq_n_u32(0);
vtmp012x4567.val[1] = vdupq_n_u32(0);
vtmp012x4567.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x89AB;
vtmp012x89AB.val[0] = vdupq_n_u32(0);
vtmp012x89AB.val[1] = vdupq_n_u32(0);
vtmp012x89AB.val[2] = vdupq_n_u32(0);
vtmp012x0123 = vld3q_lane_u32(w0, vtmp012x0123, 0); w0 += 3;
vtmp012x0123 = vld3q_lane_u32(w1, vtmp012x0123, 1); w1 += 3;
vtmp012x0123 = vld3q_lane_u32(w2, vtmp012x0123, 2); w2 += 3;
vtmp012x0123 = vld3q_lane_u32(w3, vtmp012x0123, 3); w3 += 3;
vtmp012x4567 = vld3q_lane_u32(w4, vtmp012x4567, 0); w4 += 3;
vtmp012x4567 = vld3q_lane_u32(w5, vtmp012x4567, 1); w5 += 3;
vtmp012x4567 = vld3q_lane_u32(w6, vtmp012x4567, 2); w6 += 3;
vtmp012x4567 = vld3q_lane_u32(w7, vtmp012x4567, 3); w7 += 3;
vtmp012x89AB = vld3q_lane_u32(w8, vtmp012x89AB, 0); w8 += 3;
vtmp012x89AB = vld3q_lane_u32(w9, vtmp012x89AB, 1); w9 += 3;
vtmp012x89AB = vld3q_lane_u32(w10, vtmp012x89AB, 2); w10 += 3;
vtmp012x89AB = vld3q_lane_u32(w11, vtmp012x89AB, 3); w11 += 3;
vst1q_u32(packed_weights, vtmp012x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[2]); packed_weights += 4;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w11;
}
// NC remainder (1..11)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 11);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (12 - n);
} else {
const uint32x4_t vzero = vmovq_n_u32(0);
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
}
// NR remainder has less than 12 rows so last row is not loaded
const uint32_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint32_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint32_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint32_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint32_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint32_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
const uint32_t* w7 = w6 + kc;
if XNN_UNPREDICTABLE(n < 8) {
w7 = w6;
}
const uint32_t* w8 = w7 + kc;
if XNN_UNPREDICTABLE(n <= 8) {
w8 = w7;
}
const uint32_t* w9 = w8 + kc;
if XNN_UNPREDICTABLE(n < 10) {
w9 = w8;
}
const uint32_t* w10 = w9 + kc;
if XNN_UNPREDICTABLE(n <= 10) {
w10 = w9;
}
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vtmp0123x4567 = vld4q_lane_u32(w7, vtmp0123x4567, 3); w7 += 4;
vtmp0123x89AB = vld4q_lane_u32(w8, vtmp0123x89AB, 0); w8 += 4;
vtmp0123x89AB = vld4q_lane_u32(w9, vtmp0123x89AB, 1); w9 += 4;
vtmp0123x89AB = vld4q_lane_u32(w10, vtmp0123x89AB, 2); w10 += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[3]); packed_weights += 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 1
case 1:
{
uint32x4_t vtmp0x0123 = vdupq_n_u32(0);
uint32x4_t vtmp0x4567 = vdupq_n_u32(0);
uint32x4_t vtmp0x89AB = vdupq_n_u32(0);
vtmp0x0123 = vld1q_lane_u32(w0, vtmp0x0123, 0);
vtmp0x0123 = vld1q_lane_u32(w1, vtmp0x0123, 1);
vtmp0x0123 = vld1q_lane_u32(w2, vtmp0x0123, 2);
vtmp0x0123 = vld1q_lane_u32(w3, vtmp0x0123, 3);
vtmp0x4567 = vld1q_lane_u32(w4, vtmp0x4567, 0);
vtmp0x4567 = vld1q_lane_u32(w5, vtmp0x4567, 1);
vtmp0x4567 = vld1q_lane_u32(w6, vtmp0x4567, 2);
vtmp0x4567 = vld1q_lane_u32(w7, vtmp0x4567, 3);
vtmp0x89AB = vld1q_lane_u32(w8, vtmp0x89AB, 0);
vtmp0x89AB = vld1q_lane_u32(w9, vtmp0x89AB, 1);
vtmp0x89AB = vld1q_lane_u32(w10, vtmp0x89AB, 2);
vst1q_u32(packed_weights, vtmp0x0123); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x4567); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x89AB); packed_weights += 4;
break;
}
// KC remainder of 2
case 2:
{
uint32x4x2_t vtmp01x0123;
vtmp01x0123.val[0] = vdupq_n_u32(0);
vtmp01x0123.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x4567;
vtmp01x4567.val[0] = vdupq_n_u32(0);
vtmp01x4567.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x89AB;
vtmp01x89AB.val[0] = vdupq_n_u32(0);
vtmp01x89AB.val[1] = vdupq_n_u32(0);
vtmp01x0123 = vld2q_lane_u32(w0, vtmp01x0123, 0);
vtmp01x0123 = vld2q_lane_u32(w1, vtmp01x0123, 1);
vtmp01x0123 = vld2q_lane_u32(w2, vtmp01x0123, 2);
vtmp01x0123 = vld2q_lane_u32(w3, vtmp01x0123, 3);
vtmp01x4567 = vld2q_lane_u32(w4, vtmp01x4567, 0);
vtmp01x4567 = vld2q_lane_u32(w5, vtmp01x4567, 1);
vtmp01x4567 = vld2q_lane_u32(w6, vtmp01x4567, 2);
vtmp01x4567 = vld2q_lane_u32(w7, vtmp01x4567, 3);
vtmp01x89AB = vld2q_lane_u32(w8, vtmp01x89AB, 0);
vtmp01x89AB = vld2q_lane_u32(w9, vtmp01x89AB, 1);
vtmp01x89AB = vld2q_lane_u32(w10, vtmp01x89AB, 2);
vst1q_u32(packed_weights, vtmp01x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x89AB.val[1]); packed_weights += 4;
break;
}
// KC remainder of 3
case 3:
{
uint32x4x3_t vtmp012x0123;
vtmp012x0123.val[0] = vdupq_n_u32(0);
vtmp012x0123.val[1] = vdupq_n_u32(0);
vtmp012x0123.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x4567;
vtmp012x4567.val[0] = vdupq_n_u32(0);
vtmp012x4567.val[1] = vdupq_n_u32(0);
vtmp012x4567.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x89AB;
vtmp012x89AB.val[0] = vdupq_n_u32(0);
vtmp012x89AB.val[1] = vdupq_n_u32(0);
vtmp012x89AB.val[2] = vdupq_n_u32(0);
vtmp012x0123 = vld3q_lane_u32(w0, vtmp012x0123, 0);
vtmp012x0123 = vld3q_lane_u32(w1, vtmp012x0123, 1);
vtmp012x0123 = vld3q_lane_u32(w2, vtmp012x0123, 2);
vtmp012x0123 = vld3q_lane_u32(w3, vtmp012x0123, 3);
vtmp012x4567 = vld3q_lane_u32(w4, vtmp012x4567, 0);
vtmp012x4567 = vld3q_lane_u32(w5, vtmp012x4567, 1);
vtmp012x4567 = vld3q_lane_u32(w6, vtmp012x4567, 2);
vtmp012x4567 = vld3q_lane_u32(w7, vtmp012x4567, 3);
vtmp012x89AB = vld3q_lane_u32(w8, vtmp012x89AB, 0);
vtmp012x89AB = vld3q_lane_u32(w9, vtmp012x89AB, 1);
vtmp012x89AB = vld3q_lane_u32(w10, vtmp012x89AB, 2);
vst1q_u32(packed_weights, vtmp012x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[2]); packed_weights += 4;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 18,381
| 44.275862
| 80
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x16-gemm-goi-neon-ld4lane-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x16__neon_ld4lane_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 16);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint32x4x4_t vtmp0123x0123;
uint32x4x4_t vtmp0123x4567;
uint32x4x4_t vtmp0123x89AB;
uint32x4x4_t vtmp0123xCDEF;
do {
// NC main loop multiple of 16
const uint32_t* w0 = weights;
size_t n = nc;
for (; n >= 16; n -= 16) {
if XNN_LIKELY(bias != NULL) {
uint32x4_t vb0 = vld1q_u32(bias); bias += 4;
uint32x4_t vb4 = vld1q_u32(bias); bias += 4;
uint32x4_t vb8 = vld1q_u32(bias); bias += 4;
uint32x4_t vb12 = vld1q_u32(bias); bias += 4;
vst1q_u32(packed_weights, vb0); packed_weights += 4;
vst1q_u32(packed_weights, vb4); packed_weights += 4;
vst1q_u32(packed_weights, vb8); packed_weights += 4;
vst1q_u32(packed_weights, vb12); packed_weights += 4;
} else {
const uint32x4_t vzero = vmovq_n_u32(0);
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
}
const uint32_t* w1 = w0 + kc;
const uint32_t* w2 = w1 + kc;
const uint32_t* w3 = w2 + kc;
const uint32_t* w4 = w3 + kc;
const uint32_t* w5 = w4 + kc;
const uint32_t* w6 = w5 + kc;
const uint32_t* w7 = w6 + kc;
const uint32_t* w8 = w7 + kc;
const uint32_t* w9 = w8 + kc;
const uint32_t* w10 = w9 + kc;
const uint32_t* w11 = w10 + kc;
const uint32_t* w12 = w11 + kc;
const uint32_t* w13 = w12 + kc;
const uint32_t* w14 = w13 + kc;
const uint32_t* w15 = w14 + kc;
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vtmp0123x4567 = vld4q_lane_u32(w7, vtmp0123x4567, 3); w7 += 4;
vtmp0123x89AB = vld4q_lane_u32(w8, vtmp0123x89AB, 0); w8 += 4;
vtmp0123x89AB = vld4q_lane_u32(w9, vtmp0123x89AB, 1); w9 += 4;
vtmp0123x89AB = vld4q_lane_u32(w10, vtmp0123x89AB, 2); w10 += 4;
vtmp0123x89AB = vld4q_lane_u32(w11, vtmp0123x89AB, 3); w11 += 4;
vtmp0123xCDEF = vld4q_lane_u32(w12, vtmp0123xCDEF, 0); w12 += 4;
vtmp0123xCDEF = vld4q_lane_u32(w13, vtmp0123xCDEF, 1); w13 += 4;
vtmp0123xCDEF = vld4q_lane_u32(w14, vtmp0123xCDEF, 2); w14 += 4;
vtmp0123xCDEF = vld4q_lane_u32(w15, vtmp0123xCDEF, 3); w15 += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123xCDEF.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123xCDEF.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123xCDEF.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123xCDEF.val[3]); packed_weights += 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 1
case 1:
{
uint32x4_t vtmp0x0123 = vdupq_n_u32(0);
uint32x4_t vtmp0x4567 = vdupq_n_u32(0);
uint32x4_t vtmp0x89AB = vdupq_n_u32(0);
uint32x4_t vtmp0xCDEF = vdupq_n_u32(0);
vtmp0x0123 = vld1q_lane_u32(w0, vtmp0x0123, 0); w0 += 1;
vtmp0x0123 = vld1q_lane_u32(w1, vtmp0x0123, 1); w1 += 1;
vtmp0x0123 = vld1q_lane_u32(w2, vtmp0x0123, 2); w2 += 1;
vtmp0x0123 = vld1q_lane_u32(w3, vtmp0x0123, 3); w3 += 1;
vtmp0x4567 = vld1q_lane_u32(w4, vtmp0x4567, 0); w4 += 1;
vtmp0x4567 = vld1q_lane_u32(w5, vtmp0x4567, 1); w5 += 1;
vtmp0x4567 = vld1q_lane_u32(w6, vtmp0x4567, 2); w6 += 1;
vtmp0x4567 = vld1q_lane_u32(w7, vtmp0x4567, 3); w7 += 1;
vtmp0x89AB = vld1q_lane_u32(w8, vtmp0x89AB, 0); w8 += 1;
vtmp0x89AB = vld1q_lane_u32(w9, vtmp0x89AB, 1); w9 += 1;
vtmp0x89AB = vld1q_lane_u32(w10, vtmp0x89AB, 2); w10 += 1;
vtmp0x89AB = vld1q_lane_u32(w11, vtmp0x89AB, 3); w11 += 1;
vtmp0xCDEF = vld1q_lane_u32(w12, vtmp0xCDEF, 0); w12 += 1;
vtmp0xCDEF = vld1q_lane_u32(w13, vtmp0xCDEF, 1); w13 += 1;
vtmp0xCDEF = vld1q_lane_u32(w14, vtmp0xCDEF, 2); w14 += 1;
vtmp0xCDEF = vld1q_lane_u32(w15, vtmp0xCDEF, 3); w15 += 1;
vst1q_u32(packed_weights, vtmp0x0123); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x4567); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x89AB); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0xCDEF); packed_weights += 4;
break;
}
// KC remainder of 2
case 2:
{
uint32x4x2_t vtmp01x0123;
vtmp01x0123.val[0] = vdupq_n_u32(0);
vtmp01x0123.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x4567;
vtmp01x4567.val[0] = vdupq_n_u32(0);
vtmp01x4567.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x89AB;
vtmp01x89AB.val[0] = vdupq_n_u32(0);
vtmp01x89AB.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01xCDEF;
vtmp01xCDEF.val[0] = vdupq_n_u32(0);
vtmp01xCDEF.val[1] = vdupq_n_u32(0);
vtmp01x0123 = vld2q_lane_u32(w0, vtmp01x0123, 0); w0 += 2;
vtmp01x0123 = vld2q_lane_u32(w1, vtmp01x0123, 1); w1 += 2;
vtmp01x0123 = vld2q_lane_u32(w2, vtmp01x0123, 2); w2 += 2;
vtmp01x0123 = vld2q_lane_u32(w3, vtmp01x0123, 3); w3 += 2;
vtmp01x4567 = vld2q_lane_u32(w4, vtmp01x4567, 0); w4 += 2;
vtmp01x4567 = vld2q_lane_u32(w5, vtmp01x4567, 1); w5 += 2;
vtmp01x4567 = vld2q_lane_u32(w6, vtmp01x4567, 2); w6 += 2;
vtmp01x4567 = vld2q_lane_u32(w7, vtmp01x4567, 3); w7 += 2;
vtmp01x89AB = vld2q_lane_u32(w8, vtmp01x89AB, 0); w8 += 2;
vtmp01x89AB = vld2q_lane_u32(w9, vtmp01x89AB, 1); w9 += 2;
vtmp01x89AB = vld2q_lane_u32(w10, vtmp01x89AB, 2); w10 += 2;
vtmp01x89AB = vld2q_lane_u32(w11, vtmp01x89AB, 3); w11 += 2;
vtmp01xCDEF = vld2q_lane_u32(w12, vtmp01xCDEF, 0); w12 += 2;
vtmp01xCDEF = vld2q_lane_u32(w13, vtmp01xCDEF, 1); w13 += 2;
vtmp01xCDEF = vld2q_lane_u32(w14, vtmp01xCDEF, 2); w14 += 2;
vtmp01xCDEF = vld2q_lane_u32(w15, vtmp01xCDEF, 3); w15 += 2;
vst1q_u32(packed_weights, vtmp01x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01xCDEF.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x89AB.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01xCDEF.val[1]); packed_weights += 4;
break;
}
// KC remainder of 3
case 3:
{
uint32x4x3_t vtmp012x0123;
vtmp012x0123.val[0] = vdupq_n_u32(0);
vtmp012x0123.val[1] = vdupq_n_u32(0);
vtmp012x0123.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x4567;
vtmp012x4567.val[0] = vdupq_n_u32(0);
vtmp012x4567.val[1] = vdupq_n_u32(0);
vtmp012x4567.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x89AB;
vtmp012x89AB.val[0] = vdupq_n_u32(0);
vtmp012x89AB.val[1] = vdupq_n_u32(0);
vtmp012x89AB.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012xCDEF;
vtmp012xCDEF.val[0] = vdupq_n_u32(0);
vtmp012xCDEF.val[1] = vdupq_n_u32(0);
vtmp012xCDEF.val[2] = vdupq_n_u32(0);
vtmp012x0123 = vld3q_lane_u32(w0, vtmp012x0123, 0); w0 += 3;
vtmp012x0123 = vld3q_lane_u32(w1, vtmp012x0123, 1); w1 += 3;
vtmp012x0123 = vld3q_lane_u32(w2, vtmp012x0123, 2); w2 += 3;
vtmp012x0123 = vld3q_lane_u32(w3, vtmp012x0123, 3); w3 += 3;
vtmp012x4567 = vld3q_lane_u32(w4, vtmp012x4567, 0); w4 += 3;
vtmp012x4567 = vld3q_lane_u32(w5, vtmp012x4567, 1); w5 += 3;
vtmp012x4567 = vld3q_lane_u32(w6, vtmp012x4567, 2); w6 += 3;
vtmp012x4567 = vld3q_lane_u32(w7, vtmp012x4567, 3); w7 += 3;
vtmp012x89AB = vld3q_lane_u32(w8, vtmp012x89AB, 0); w8 += 3;
vtmp012x89AB = vld3q_lane_u32(w9, vtmp012x89AB, 1); w9 += 3;
vtmp012x89AB = vld3q_lane_u32(w10, vtmp012x89AB, 2); w10 += 3;
vtmp012x89AB = vld3q_lane_u32(w11, vtmp012x89AB, 3); w11 += 3;
vtmp012xCDEF = vld3q_lane_u32(w12, vtmp012xCDEF, 0); w12 += 3;
vtmp012xCDEF = vld3q_lane_u32(w13, vtmp012xCDEF, 1); w13 += 3;
vtmp012xCDEF = vld3q_lane_u32(w14, vtmp012xCDEF, 2); w14 += 3;
vtmp012xCDEF = vld3q_lane_u32(w15, vtmp012xCDEF, 3); w15 += 3;
vst1q_u32(packed_weights, vtmp012x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012xCDEF.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012xCDEF.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012xCDEF.val[2]); packed_weights += 4;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w15;
}
// NC remainder (1..15)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 15);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (16 - n);
} else {
const uint32x4_t vzero = vmovq_n_u32(0);
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
}
// NR remainder has less than 16 rows so last row is not loaded
const uint32_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint32_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint32_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint32_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint32_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint32_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
const uint32_t* w7 = w6 + kc;
if XNN_UNPREDICTABLE(n < 8) {
w7 = w6;
}
const uint32_t* w8 = w7 + kc;
if XNN_UNPREDICTABLE(n <= 8) {
w8 = w7;
}
const uint32_t* w9 = w8 + kc;
if XNN_UNPREDICTABLE(n < 10) {
w9 = w8;
}
const uint32_t* w10 = w9 + kc;
if XNN_UNPREDICTABLE(n <= 10) {
w10 = w9;
}
const uint32_t* w11 = w10 + kc;
if XNN_UNPREDICTABLE(n < 12) {
w11 = w10;
}
const uint32_t* w12 = w11 + kc;
if XNN_UNPREDICTABLE(n <= 12) {
w12 = w11;
}
const uint32_t* w13 = w12 + kc;
if XNN_UNPREDICTABLE(n < 14) {
w13 = w12;
}
const uint32_t* w14 = w13 + kc;
if XNN_UNPREDICTABLE(n <= 14) {
w14 = w13;
}
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vtmp0123x4567 = vld4q_lane_u32(w7, vtmp0123x4567, 3); w7 += 4;
vtmp0123x89AB = vld4q_lane_u32(w8, vtmp0123x89AB, 0); w8 += 4;
vtmp0123x89AB = vld4q_lane_u32(w9, vtmp0123x89AB, 1); w9 += 4;
vtmp0123x89AB = vld4q_lane_u32(w10, vtmp0123x89AB, 2); w10 += 4;
vtmp0123x89AB = vld4q_lane_u32(w11, vtmp0123x89AB, 3); w11 += 4;
vtmp0123xCDEF = vld4q_lane_u32(w12, vtmp0123xCDEF, 0); w12 += 4;
vtmp0123xCDEF = vld4q_lane_u32(w13, vtmp0123xCDEF, 1); w13 += 4;
vtmp0123xCDEF = vld4q_lane_u32(w14, vtmp0123xCDEF, 2); w14 += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123xCDEF.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123xCDEF.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123xCDEF.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x89AB.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123xCDEF.val[3]); packed_weights += 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 1
case 1:
{
uint32x4_t vtmp0x0123 = vdupq_n_u32(0);
uint32x4_t vtmp0x4567 = vdupq_n_u32(0);
uint32x4_t vtmp0x89AB = vdupq_n_u32(0);
uint32x4_t vtmp0xCDEF = vdupq_n_u32(0);
vtmp0x0123 = vld1q_lane_u32(w0, vtmp0x0123, 0);
vtmp0x0123 = vld1q_lane_u32(w1, vtmp0x0123, 1);
vtmp0x0123 = vld1q_lane_u32(w2, vtmp0x0123, 2);
vtmp0x0123 = vld1q_lane_u32(w3, vtmp0x0123, 3);
vtmp0x4567 = vld1q_lane_u32(w4, vtmp0x4567, 0);
vtmp0x4567 = vld1q_lane_u32(w5, vtmp0x4567, 1);
vtmp0x4567 = vld1q_lane_u32(w6, vtmp0x4567, 2);
vtmp0x4567 = vld1q_lane_u32(w7, vtmp0x4567, 3);
vtmp0x89AB = vld1q_lane_u32(w8, vtmp0x89AB, 0);
vtmp0x89AB = vld1q_lane_u32(w9, vtmp0x89AB, 1);
vtmp0x89AB = vld1q_lane_u32(w10, vtmp0x89AB, 2);
vtmp0x89AB = vld1q_lane_u32(w11, vtmp0x89AB, 3);
vtmp0xCDEF = vld1q_lane_u32(w12, vtmp0xCDEF, 0);
vtmp0xCDEF = vld1q_lane_u32(w13, vtmp0xCDEF, 1);
vtmp0xCDEF = vld1q_lane_u32(w14, vtmp0xCDEF, 2);
vst1q_u32(packed_weights, vtmp0x0123); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x4567); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x89AB); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0xCDEF); packed_weights += 4;
break;
}
// KC remainder of 2
case 2:
{
uint32x4x2_t vtmp01x0123;
vtmp01x0123.val[0] = vdupq_n_u32(0);
vtmp01x0123.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x4567;
vtmp01x4567.val[0] = vdupq_n_u32(0);
vtmp01x4567.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x89AB;
vtmp01x89AB.val[0] = vdupq_n_u32(0);
vtmp01x89AB.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01xCDEF;
vtmp01xCDEF.val[0] = vdupq_n_u32(0);
vtmp01xCDEF.val[1] = vdupq_n_u32(0);
vtmp01x0123 = vld2q_lane_u32(w0, vtmp01x0123, 0);
vtmp01x0123 = vld2q_lane_u32(w1, vtmp01x0123, 1);
vtmp01x0123 = vld2q_lane_u32(w2, vtmp01x0123, 2);
vtmp01x0123 = vld2q_lane_u32(w3, vtmp01x0123, 3);
vtmp01x4567 = vld2q_lane_u32(w4, vtmp01x4567, 0);
vtmp01x4567 = vld2q_lane_u32(w5, vtmp01x4567, 1);
vtmp01x4567 = vld2q_lane_u32(w6, vtmp01x4567, 2);
vtmp01x4567 = vld2q_lane_u32(w7, vtmp01x4567, 3);
vtmp01x89AB = vld2q_lane_u32(w8, vtmp01x89AB, 0);
vtmp01x89AB = vld2q_lane_u32(w9, vtmp01x89AB, 1);
vtmp01x89AB = vld2q_lane_u32(w10, vtmp01x89AB, 2);
vtmp01x89AB = vld2q_lane_u32(w11, vtmp01x89AB, 3);
vtmp01xCDEF = vld2q_lane_u32(w12, vtmp01xCDEF, 0);
vtmp01xCDEF = vld2q_lane_u32(w13, vtmp01xCDEF, 1);
vtmp01xCDEF = vld2q_lane_u32(w14, vtmp01xCDEF, 2);
vst1q_u32(packed_weights, vtmp01x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01xCDEF.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x89AB.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01xCDEF.val[1]); packed_weights += 4;
break;
}
// KC remainder of 3
case 3:
{
uint32x4x3_t vtmp012x0123;
vtmp012x0123.val[0] = vdupq_n_u32(0);
vtmp012x0123.val[1] = vdupq_n_u32(0);
vtmp012x0123.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x4567;
vtmp012x4567.val[0] = vdupq_n_u32(0);
vtmp012x4567.val[1] = vdupq_n_u32(0);
vtmp012x4567.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x89AB;
vtmp012x89AB.val[0] = vdupq_n_u32(0);
vtmp012x89AB.val[1] = vdupq_n_u32(0);
vtmp012x89AB.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012xCDEF;
vtmp012xCDEF.val[0] = vdupq_n_u32(0);
vtmp012xCDEF.val[1] = vdupq_n_u32(0);
vtmp012xCDEF.val[2] = vdupq_n_u32(0);
vtmp012x0123 = vld3q_lane_u32(w0, vtmp012x0123, 0);
vtmp012x0123 = vld3q_lane_u32(w1, vtmp012x0123, 1);
vtmp012x0123 = vld3q_lane_u32(w2, vtmp012x0123, 2);
vtmp012x0123 = vld3q_lane_u32(w3, vtmp012x0123, 3);
vtmp012x4567 = vld3q_lane_u32(w4, vtmp012x4567, 0);
vtmp012x4567 = vld3q_lane_u32(w5, vtmp012x4567, 1);
vtmp012x4567 = vld3q_lane_u32(w6, vtmp012x4567, 2);
vtmp012x4567 = vld3q_lane_u32(w7, vtmp012x4567, 3);
vtmp012x89AB = vld3q_lane_u32(w8, vtmp012x89AB, 0);
vtmp012x89AB = vld3q_lane_u32(w9, vtmp012x89AB, 1);
vtmp012x89AB = vld3q_lane_u32(w10, vtmp012x89AB, 2);
vtmp012x89AB = vld3q_lane_u32(w11, vtmp012x89AB, 3);
vtmp012xCDEF = vld3q_lane_u32(w12, vtmp012xCDEF, 0);
vtmp012xCDEF = vld3q_lane_u32(w13, vtmp012xCDEF, 1);
vtmp012xCDEF = vld3q_lane_u32(w14, vtmp012xCDEF, 2);
vst1q_u32(packed_weights, vtmp012x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012xCDEF.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012xCDEF.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x89AB.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012xCDEF.val[2]); packed_weights += 4;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 23,759
| 46.61523
| 80
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x16-gemm-goi-scalar-float-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x16__scalar_float_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 16);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
float* out = (float*) packed_weights;
const float* b = (const float*) bias;
do {
// NC main loop multiple of 16
const float* w0 = (const float*) weights;
size_t n = nc;
for (;n >= 16; n -= 16) {
if XNN_LIKELY(b != NULL) {
out[0] = b[0];
out[1] = b[1];
out[2] = b[2];
out[3] = b[3];
out[4] = b[4];
out[5] = b[5];
out[6] = b[6];
out[7] = b[7];
out[8] = b[8];
out[9] = b[9];
out[10] = b[10];
out[11] = b[11];
out[12] = b[12];
out[13] = b[13];
out[14] = b[14];
out[15] = b[15];
b += 16;
} else {
out[0] = 0;
out[1] = 0;
out[2] = 0;
out[3] = 0;
out[4] = 0;
out[5] = 0;
out[6] = 0;
out[7] = 0;
out[8] = 0;
out[9] = 0;
out[10] = 0;
out[11] = 0;
out[12] = 0;
out[13] = 0;
out[14] = 0;
out[15] = 0;
}
out += 16;
const float* w1 = w0 + kc;
const float* w2 = w1 + kc;
const float* w3 = w2 + kc;
const float* w4 = w3 + kc;
const float* w5 = w4 + kc;
const float* w6 = w5 + kc;
const float* w7 = w6 + kc;
const float* w8 = w7 + kc;
const float* w9 = w8 + kc;
const float* w10 = w9 + kc;
const float* w11 = w10 + kc;
const float* w12 = w11 + kc;
const float* w13 = w12 + kc;
const float* w14 = w13 + kc;
const float* w15 = w14 + kc;
// KC main loop multiple of 16x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const float v00 = w0[0];
const float v01 = w0[1];
const float v02 = w0[2];
const float v03 = w0[3];
w0 += 4;
const float v10 = w1[0];
const float v11 = w1[1];
const float v12 = w1[2];
const float v13 = w1[3];
w1 += 4;
const float v20 = w2[0];
const float v21 = w2[1];
const float v22 = w2[2];
const float v23 = w2[3];
w2 += 4;
const float v30 = w3[0];
const float v31 = w3[1];
const float v32 = w3[2];
const float v33 = w3[3];
w3 += 4;
const float v40 = w4[0];
const float v41 = w4[1];
const float v42 = w4[2];
const float v43 = w4[3];
w4 += 4;
const float v50 = w5[0];
const float v51 = w5[1];
const float v52 = w5[2];
const float v53 = w5[3];
w5 += 4;
const float v60 = w6[0];
const float v61 = w6[1];
const float v62 = w6[2];
const float v63 = w6[3];
w6 += 4;
const float v70 = w7[0];
const float v71 = w7[1];
const float v72 = w7[2];
const float v73 = w7[3];
w7 += 4;
const float v80 = w8[0];
const float v81 = w8[1];
const float v82 = w8[2];
const float v83 = w8[3];
w8 += 4;
const float v90 = w9[0];
const float v91 = w9[1];
const float v92 = w9[2];
const float v93 = w9[3];
w9 += 4;
const float v100 = w10[0];
const float v101 = w10[1];
const float v102 = w10[2];
const float v103 = w10[3];
w10 += 4;
const float v110 = w11[0];
const float v111 = w11[1];
const float v112 = w11[2];
const float v113 = w11[3];
w11 += 4;
const float v120 = w12[0];
const float v121 = w12[1];
const float v122 = w12[2];
const float v123 = w12[3];
w12 += 4;
const float v130 = w13[0];
const float v131 = w13[1];
const float v132 = w13[2];
const float v133 = w13[3];
w13 += 4;
const float v140 = w14[0];
const float v141 = w14[1];
const float v142 = w14[2];
const float v143 = w14[3];
w14 += 4;
const float v150 = w15[0];
const float v151 = w15[1];
const float v152 = w15[2];
const float v153 = w15[3];
w15 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[7] = v70;
out[8] = v80;
out[9] = v90;
out[10] = v100;
out[11] = v110;
out[12] = v120;
out[13] = v130;
out[14] = v140;
out[15] = v150;
out[16] = v01;
out[17] = v11;
out[18] = v21;
out[19] = v31;
out[20] = v41;
out[21] = v51;
out[22] = v61;
out[23] = v71;
out[24] = v81;
out[25] = v91;
out[26] = v101;
out[27] = v111;
out[28] = v121;
out[29] = v131;
out[30] = v141;
out[31] = v151;
out[32] = v02;
out[33] = v12;
out[34] = v22;
out[35] = v32;
out[36] = v42;
out[37] = v52;
out[38] = v62;
out[39] = v72;
out[40] = v82;
out[41] = v92;
out[42] = v102;
out[43] = v112;
out[44] = v122;
out[45] = v132;
out[46] = v142;
out[47] = v152;
out[48] = v03;
out[49] = v13;
out[50] = v23;
out[51] = v33;
out[52] = v43;
out[53] = v53;
out[54] = v63;
out[55] = v73;
out[56] = v83;
out[57] = v93;
out[58] = v103;
out[59] = v113;
out[60] = v123;
out[61] = v133;
out[62] = v143;
out[63] = v153;
out += 64;
}
// KC remainder
for (; k != 0; --k) {
const float v0 = *w0++;
out[0] = v0;
const float v1 = *w1++;
out[1] = v1;
const float v2 = *w2++;
out[2] = v2;
const float v3 = *w3++;
out[3] = v3;
const float v4 = *w4++;
out[4] = v4;
const float v5 = *w5++;
out[5] = v5;
const float v6 = *w6++;
out[6] = v6;
const float v7 = *w7++;
out[7] = v7;
const float v8 = *w8++;
out[8] = v8;
const float v9 = *w9++;
out[9] = v9;
const float v10 = *w10++;
out[10] = v10;
const float v11 = *w11++;
out[11] = v11;
const float v12 = *w12++;
out[12] = v12;
const float v13 = *w13++;
out[13] = v13;
const float v14 = *w14++;
out[14] = v14;
const float v15 = *w15++;
out[15] = v15;
out += 16;
}
out = (float*) ((uintptr_t) out + extra_bytes);
w0 = w15;
}
// NC remainder (1..15)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*out++ = *b++;
} while (--nb != 0);
} else {
size_t nb = n;
do {
*out++ = 0;
} while (--nb != 0);
}
out += (16 - n);
// NR remainder has less than 16 rows so last row is not loaded
const float* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const float* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const float* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const float* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const float* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const float* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
const float* w7 = w6 + kc;
if XNN_UNPREDICTABLE(n < 8) {
w7 = w6;
}
const float* w8 = w7 + kc;
if XNN_UNPREDICTABLE(n <= 8) {
w8 = w7;
}
const float* w9 = w8 + kc;
if XNN_UNPREDICTABLE(n < 10) {
w9 = w8;
}
const float* w10 = w9 + kc;
if XNN_UNPREDICTABLE(n <= 10) {
w10 = w9;
}
const float* w11 = w10 + kc;
if XNN_UNPREDICTABLE(n < 12) {
w11 = w10;
}
const float* w12 = w11 + kc;
if XNN_UNPREDICTABLE(n <= 12) {
w12 = w11;
}
const float* w13 = w12 + kc;
if XNN_UNPREDICTABLE(n < 14) {
w13 = w12;
}
const float* w14 = w13 + kc;
if XNN_UNPREDICTABLE(n <= 14) {
w14 = w13;
}
// KC main loop multiple of 16x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const float v00 = w0[0];
const float v01 = w0[1];
const float v02 = w0[2];
const float v03 = w0[3];
w0 += 4;
const float v10 = w1[0];
const float v11 = w1[1];
const float v12 = w1[2];
const float v13 = w1[3];
w1 += 4;
const float v20 = w2[0];
const float v21 = w2[1];
const float v22 = w2[2];
const float v23 = w2[3];
w2 += 4;
const float v30 = w3[0];
const float v31 = w3[1];
const float v32 = w3[2];
const float v33 = w3[3];
w3 += 4;
const float v40 = w4[0];
const float v41 = w4[1];
const float v42 = w4[2];
const float v43 = w4[3];
w4 += 4;
const float v50 = w5[0];
const float v51 = w5[1];
const float v52 = w5[2];
const float v53 = w5[3];
w5 += 4;
const float v60 = w6[0];
const float v61 = w6[1];
const float v62 = w6[2];
const float v63 = w6[3];
w6 += 4;
const float v70 = w7[0];
const float v71 = w7[1];
const float v72 = w7[2];
const float v73 = w7[3];
w7 += 4;
const float v80 = w8[0];
const float v81 = w8[1];
const float v82 = w8[2];
const float v83 = w8[3];
w8 += 4;
const float v90 = w9[0];
const float v91 = w9[1];
const float v92 = w9[2];
const float v93 = w9[3];
w9 += 4;
const float v100 = w10[0];
const float v101 = w10[1];
const float v102 = w10[2];
const float v103 = w10[3];
w10 += 4;
const float v110 = w11[0];
const float v111 = w11[1];
const float v112 = w11[2];
const float v113 = w11[3];
w11 += 4;
const float v120 = w12[0];
const float v121 = w12[1];
const float v122 = w12[2];
const float v123 = w12[3];
w12 += 4;
const float v130 = w13[0];
const float v131 = w13[1];
const float v132 = w13[2];
const float v133 = w13[3];
w13 += 4;
const float v140 = w14[0];
const float v141 = w14[1];
const float v142 = w14[2];
const float v143 = w14[3];
w14 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[7] = v70;
out[8] = v80;
out[9] = v90;
out[10] = v100;
out[11] = v110;
out[12] = v120;
out[13] = v130;
out[14] = v140;
out[16] = v01;
out[17] = v11;
out[18] = v21;
out[19] = v31;
out[20] = v41;
out[21] = v51;
out[22] = v61;
out[23] = v71;
out[24] = v81;
out[25] = v91;
out[26] = v101;
out[27] = v111;
out[28] = v121;
out[29] = v131;
out[30] = v141;
out[32] = v02;
out[33] = v12;
out[34] = v22;
out[35] = v32;
out[36] = v42;
out[37] = v52;
out[38] = v62;
out[39] = v72;
out[40] = v82;
out[41] = v92;
out[42] = v102;
out[43] = v112;
out[44] = v122;
out[45] = v132;
out[46] = v142;
out[48] = v03;
out[49] = v13;
out[50] = v23;
out[51] = v33;
out[52] = v43;
out[53] = v53;
out[54] = v63;
out[55] = v73;
out[56] = v83;
out[57] = v93;
out[58] = v103;
out[59] = v113;
out[60] = v123;
out[61] = v133;
out[62] = v143;
out += 64;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const float v0 = *w0++;
out[0] = v0;
const float v1 = *w1++;
out[1] = v1;
const float v2 = *w2++;
out[2] = v2;
const float v3 = *w3++;
out[3] = v3;
const float v4 = *w4++;
out[4] = v4;
const float v5 = *w5++;
out[5] = v5;
const float v6 = *w6++;
out[6] = v6;
const float v7 = *w7++;
out[7] = v7;
const float v8 = *w8++;
out[8] = v8;
const float v9 = *w9++;
out[9] = v9;
const float v10 = *w10++;
out[10] = v10;
const float v11 = *w11++;
out[11] = v11;
const float v12 = *w12++;
out[12] = v12;
const float v13 = *w13++;
out[13] = v13;
const float v14 = *w14++;
out[14] = v14;
out += 16;
}
out = (float*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 13,949
| 24.502742
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x16-gemm-goi-scalar-int-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x16__scalar_int_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 16);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint32_t* out = (uint32_t*) packed_weights;
const uint32_t* b = (const uint32_t*) bias;
do {
// NC main loop multiple of 16
const uint32_t* w0 = (const uint32_t*) weights;
size_t n = nc;
for (;n >= 16; n -= 16) {
if XNN_LIKELY(b != NULL) {
out[0] = b[0];
out[1] = b[1];
out[2] = b[2];
out[3] = b[3];
out[4] = b[4];
out[5] = b[5];
out[6] = b[6];
out[7] = b[7];
out[8] = b[8];
out[9] = b[9];
out[10] = b[10];
out[11] = b[11];
out[12] = b[12];
out[13] = b[13];
out[14] = b[14];
out[15] = b[15];
b += 16;
} else {
out[0] = 0;
out[1] = 0;
out[2] = 0;
out[3] = 0;
out[4] = 0;
out[5] = 0;
out[6] = 0;
out[7] = 0;
out[8] = 0;
out[9] = 0;
out[10] = 0;
out[11] = 0;
out[12] = 0;
out[13] = 0;
out[14] = 0;
out[15] = 0;
}
out += 16;
const uint32_t* w1 = w0 + kc;
const uint32_t* w2 = w1 + kc;
const uint32_t* w3 = w2 + kc;
const uint32_t* w4 = w3 + kc;
const uint32_t* w5 = w4 + kc;
const uint32_t* w6 = w5 + kc;
const uint32_t* w7 = w6 + kc;
const uint32_t* w8 = w7 + kc;
const uint32_t* w9 = w8 + kc;
const uint32_t* w10 = w9 + kc;
const uint32_t* w11 = w10 + kc;
const uint32_t* w12 = w11 + kc;
const uint32_t* w13 = w12 + kc;
const uint32_t* w14 = w13 + kc;
const uint32_t* w15 = w14 + kc;
// KC main loop multiple of 16x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const uint32_t v00 = w0[0];
const uint32_t v01 = w0[1];
const uint32_t v02 = w0[2];
const uint32_t v03 = w0[3];
w0 += 4;
const uint32_t v10 = w1[0];
const uint32_t v11 = w1[1];
const uint32_t v12 = w1[2];
const uint32_t v13 = w1[3];
w1 += 4;
const uint32_t v20 = w2[0];
const uint32_t v21 = w2[1];
const uint32_t v22 = w2[2];
const uint32_t v23 = w2[3];
w2 += 4;
const uint32_t v30 = w3[0];
const uint32_t v31 = w3[1];
const uint32_t v32 = w3[2];
const uint32_t v33 = w3[3];
w3 += 4;
const uint32_t v40 = w4[0];
const uint32_t v41 = w4[1];
const uint32_t v42 = w4[2];
const uint32_t v43 = w4[3];
w4 += 4;
const uint32_t v50 = w5[0];
const uint32_t v51 = w5[1];
const uint32_t v52 = w5[2];
const uint32_t v53 = w5[3];
w5 += 4;
const uint32_t v60 = w6[0];
const uint32_t v61 = w6[1];
const uint32_t v62 = w6[2];
const uint32_t v63 = w6[3];
w6 += 4;
const uint32_t v70 = w7[0];
const uint32_t v71 = w7[1];
const uint32_t v72 = w7[2];
const uint32_t v73 = w7[3];
w7 += 4;
const uint32_t v80 = w8[0];
const uint32_t v81 = w8[1];
const uint32_t v82 = w8[2];
const uint32_t v83 = w8[3];
w8 += 4;
const uint32_t v90 = w9[0];
const uint32_t v91 = w9[1];
const uint32_t v92 = w9[2];
const uint32_t v93 = w9[3];
w9 += 4;
const uint32_t v100 = w10[0];
const uint32_t v101 = w10[1];
const uint32_t v102 = w10[2];
const uint32_t v103 = w10[3];
w10 += 4;
const uint32_t v110 = w11[0];
const uint32_t v111 = w11[1];
const uint32_t v112 = w11[2];
const uint32_t v113 = w11[3];
w11 += 4;
const uint32_t v120 = w12[0];
const uint32_t v121 = w12[1];
const uint32_t v122 = w12[2];
const uint32_t v123 = w12[3];
w12 += 4;
const uint32_t v130 = w13[0];
const uint32_t v131 = w13[1];
const uint32_t v132 = w13[2];
const uint32_t v133 = w13[3];
w13 += 4;
const uint32_t v140 = w14[0];
const uint32_t v141 = w14[1];
const uint32_t v142 = w14[2];
const uint32_t v143 = w14[3];
w14 += 4;
const uint32_t v150 = w15[0];
const uint32_t v151 = w15[1];
const uint32_t v152 = w15[2];
const uint32_t v153 = w15[3];
w15 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[7] = v70;
out[8] = v80;
out[9] = v90;
out[10] = v100;
out[11] = v110;
out[12] = v120;
out[13] = v130;
out[14] = v140;
out[15] = v150;
out[16] = v01;
out[17] = v11;
out[18] = v21;
out[19] = v31;
out[20] = v41;
out[21] = v51;
out[22] = v61;
out[23] = v71;
out[24] = v81;
out[25] = v91;
out[26] = v101;
out[27] = v111;
out[28] = v121;
out[29] = v131;
out[30] = v141;
out[31] = v151;
out[32] = v02;
out[33] = v12;
out[34] = v22;
out[35] = v32;
out[36] = v42;
out[37] = v52;
out[38] = v62;
out[39] = v72;
out[40] = v82;
out[41] = v92;
out[42] = v102;
out[43] = v112;
out[44] = v122;
out[45] = v132;
out[46] = v142;
out[47] = v152;
out[48] = v03;
out[49] = v13;
out[50] = v23;
out[51] = v33;
out[52] = v43;
out[53] = v53;
out[54] = v63;
out[55] = v73;
out[56] = v83;
out[57] = v93;
out[58] = v103;
out[59] = v113;
out[60] = v123;
out[61] = v133;
out[62] = v143;
out[63] = v153;
out += 64;
}
// KC remainder
for (; k != 0; --k) {
const uint32_t v0 = *w0++;
out[0] = v0;
const uint32_t v1 = *w1++;
out[1] = v1;
const uint32_t v2 = *w2++;
out[2] = v2;
const uint32_t v3 = *w3++;
out[3] = v3;
const uint32_t v4 = *w4++;
out[4] = v4;
const uint32_t v5 = *w5++;
out[5] = v5;
const uint32_t v6 = *w6++;
out[6] = v6;
const uint32_t v7 = *w7++;
out[7] = v7;
const uint32_t v8 = *w8++;
out[8] = v8;
const uint32_t v9 = *w9++;
out[9] = v9;
const uint32_t v10 = *w10++;
out[10] = v10;
const uint32_t v11 = *w11++;
out[11] = v11;
const uint32_t v12 = *w12++;
out[12] = v12;
const uint32_t v13 = *w13++;
out[13] = v13;
const uint32_t v14 = *w14++;
out[14] = v14;
const uint32_t v15 = *w15++;
out[15] = v15;
out += 16;
}
out = (uint32_t*) ((uintptr_t) out + extra_bytes);
w0 = w15;
}
// NC remainder (1..15)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*out++ = *b++;
} while (--nb != 0);
} else {
size_t nb = n;
do {
*out++ = 0;
} while (--nb != 0);
}
out += (16 - n);
// NR remainder has less than 16 rows so last row is not loaded
const uint32_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint32_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint32_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint32_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint32_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint32_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
const uint32_t* w7 = w6 + kc;
if XNN_UNPREDICTABLE(n < 8) {
w7 = w6;
}
const uint32_t* w8 = w7 + kc;
if XNN_UNPREDICTABLE(n <= 8) {
w8 = w7;
}
const uint32_t* w9 = w8 + kc;
if XNN_UNPREDICTABLE(n < 10) {
w9 = w8;
}
const uint32_t* w10 = w9 + kc;
if XNN_UNPREDICTABLE(n <= 10) {
w10 = w9;
}
const uint32_t* w11 = w10 + kc;
if XNN_UNPREDICTABLE(n < 12) {
w11 = w10;
}
const uint32_t* w12 = w11 + kc;
if XNN_UNPREDICTABLE(n <= 12) {
w12 = w11;
}
const uint32_t* w13 = w12 + kc;
if XNN_UNPREDICTABLE(n < 14) {
w13 = w12;
}
const uint32_t* w14 = w13 + kc;
if XNN_UNPREDICTABLE(n <= 14) {
w14 = w13;
}
// KC main loop multiple of 16x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const uint32_t v00 = w0[0];
const uint32_t v01 = w0[1];
const uint32_t v02 = w0[2];
const uint32_t v03 = w0[3];
w0 += 4;
const uint32_t v10 = w1[0];
const uint32_t v11 = w1[1];
const uint32_t v12 = w1[2];
const uint32_t v13 = w1[3];
w1 += 4;
const uint32_t v20 = w2[0];
const uint32_t v21 = w2[1];
const uint32_t v22 = w2[2];
const uint32_t v23 = w2[3];
w2 += 4;
const uint32_t v30 = w3[0];
const uint32_t v31 = w3[1];
const uint32_t v32 = w3[2];
const uint32_t v33 = w3[3];
w3 += 4;
const uint32_t v40 = w4[0];
const uint32_t v41 = w4[1];
const uint32_t v42 = w4[2];
const uint32_t v43 = w4[3];
w4 += 4;
const uint32_t v50 = w5[0];
const uint32_t v51 = w5[1];
const uint32_t v52 = w5[2];
const uint32_t v53 = w5[3];
w5 += 4;
const uint32_t v60 = w6[0];
const uint32_t v61 = w6[1];
const uint32_t v62 = w6[2];
const uint32_t v63 = w6[3];
w6 += 4;
const uint32_t v70 = w7[0];
const uint32_t v71 = w7[1];
const uint32_t v72 = w7[2];
const uint32_t v73 = w7[3];
w7 += 4;
const uint32_t v80 = w8[0];
const uint32_t v81 = w8[1];
const uint32_t v82 = w8[2];
const uint32_t v83 = w8[3];
w8 += 4;
const uint32_t v90 = w9[0];
const uint32_t v91 = w9[1];
const uint32_t v92 = w9[2];
const uint32_t v93 = w9[3];
w9 += 4;
const uint32_t v100 = w10[0];
const uint32_t v101 = w10[1];
const uint32_t v102 = w10[2];
const uint32_t v103 = w10[3];
w10 += 4;
const uint32_t v110 = w11[0];
const uint32_t v111 = w11[1];
const uint32_t v112 = w11[2];
const uint32_t v113 = w11[3];
w11 += 4;
const uint32_t v120 = w12[0];
const uint32_t v121 = w12[1];
const uint32_t v122 = w12[2];
const uint32_t v123 = w12[3];
w12 += 4;
const uint32_t v130 = w13[0];
const uint32_t v131 = w13[1];
const uint32_t v132 = w13[2];
const uint32_t v133 = w13[3];
w13 += 4;
const uint32_t v140 = w14[0];
const uint32_t v141 = w14[1];
const uint32_t v142 = w14[2];
const uint32_t v143 = w14[3];
w14 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[7] = v70;
out[8] = v80;
out[9] = v90;
out[10] = v100;
out[11] = v110;
out[12] = v120;
out[13] = v130;
out[14] = v140;
out[16] = v01;
out[17] = v11;
out[18] = v21;
out[19] = v31;
out[20] = v41;
out[21] = v51;
out[22] = v61;
out[23] = v71;
out[24] = v81;
out[25] = v91;
out[26] = v101;
out[27] = v111;
out[28] = v121;
out[29] = v131;
out[30] = v141;
out[32] = v02;
out[33] = v12;
out[34] = v22;
out[35] = v32;
out[36] = v42;
out[37] = v52;
out[38] = v62;
out[39] = v72;
out[40] = v82;
out[41] = v92;
out[42] = v102;
out[43] = v112;
out[44] = v122;
out[45] = v132;
out[46] = v142;
out[48] = v03;
out[49] = v13;
out[50] = v23;
out[51] = v33;
out[52] = v43;
out[53] = v53;
out[54] = v63;
out[55] = v73;
out[56] = v83;
out[57] = v93;
out[58] = v103;
out[59] = v113;
out[60] = v123;
out[61] = v133;
out[62] = v143;
out += 64;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const uint32_t v0 = *w0++;
out[0] = v0;
const uint32_t v1 = *w1++;
out[1] = v1;
const uint32_t v2 = *w2++;
out[2] = v2;
const uint32_t v3 = *w3++;
out[3] = v3;
const uint32_t v4 = *w4++;
out[4] = v4;
const uint32_t v5 = *w5++;
out[5] = v5;
const uint32_t v6 = *w6++;
out[6] = v6;
const uint32_t v7 = *w7++;
out[7] = v7;
const uint32_t v8 = *w8++;
out[8] = v8;
const uint32_t v9 = *w9++;
out[9] = v9;
const uint32_t v10 = *w10++;
out[10] = v10;
const uint32_t v11 = *w11++;
out[11] = v11;
const uint32_t v12 = *w12++;
out[12] = v12;
const uint32_t v13 = *w13++;
out[13] = v13;
const uint32_t v14 = *w14++;
out[14] = v14;
out += 16;
}
out = (uint32_t*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 14,523
| 25.552102
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x2-gemm-goi-neon-ld2lane-x2-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/NR2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
#include <xnnpack/prefetch.h>
void xnn_x32_packw_gemm_goi_ukernel_x2__neon_ld2lane_x2_prfm(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 2);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint32x2x2_t v00;
do {
// NC main loop multiple of 2
const uint32_t* w0 = weights;
size_t n = nc;
for (; n >= 2; n -= 2) {
if XNN_LIKELY(bias != NULL) {
uint32x2_t vb0 = vld1_u32(bias); bias += 2;
vst1_u32(packed_weights, vb0); packed_weights += 2;
} else {
const uint32x2_t vzero = vmov_n_u32(0);
vst1_u32(packed_weights, vzero); packed_weights += 2;
}
const uint32_t* w1 = w0 + kc;
xnn_prefetch_to_l1((const int8_t*) w0);
xnn_prefetch_to_l1((const int8_t*) w0 + 64);
xnn_prefetch_to_l1((const int8_t*) w1);
xnn_prefetch_to_l1((const int8_t*) w1 + 64);
// KC main loop multiple of 2
size_t k = kc;
for (; k >= 2; k -= 2) {
v00 = vld2_lane_u32(w0, v00, 0); w0 += 2;
v00 = vld2_lane_u32(w1, v00, 1); w1 += 2;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
vst1_u32(packed_weights + 0, v00.val[0]);
vst1_u32(packed_weights + 2, v00.val[1]);
packed_weights += 4;
}
// KC remainder
for (; k != 0; --k) {
v00.val[0] = vld1_lane_u32(w0, v00.val[0], 0); w0 += 1;
v00.val[0] = vld1_lane_u32(w1, v00.val[0], 1); w1 += 1;
vst1_u32(packed_weights + 0, v00.val[0]);
packed_weights += 2;
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w1;
}
if XNN_UNLIKELY(n != 0) {
// NC remainder of 1
if XNN_LIKELY(bias != NULL) {
*packed_weights = *bias++;
} else {
const uint32x2_t vzero = vmov_n_u32(0);
vst1_u32(packed_weights + 0, vzero);
}
packed_weights += 2;
size_t k = kc;
do {
*packed_weights = *w0++;
packed_weights += 2;
} while (--k);
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 2,838
| 25.53271
| 78
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x2-gemm-goi-neon-ld2lane-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/NR2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x2__neon_ld2lane_x2(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 2);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint32x2x2_t v00;
do {
// NC main loop multiple of 2
const uint32_t* w0 = weights;
size_t n = nc;
for (; n >= 2; n -= 2) {
if XNN_LIKELY(bias != NULL) {
uint32x2_t vb0 = vld1_u32(bias); bias += 2;
vst1_u32(packed_weights, vb0); packed_weights += 2;
} else {
const uint32x2_t vzero = vmov_n_u32(0);
vst1_u32(packed_weights, vzero); packed_weights += 2;
}
const uint32_t* w1 = w0 + kc;
// KC main loop multiple of 2
size_t k = kc;
for (; k >= 2; k -= 2) {
v00 = vld2_lane_u32(w0, v00, 0); w0 += 2;
v00 = vld2_lane_u32(w1, v00, 1); w1 += 2;
vst1_u32(packed_weights + 0, v00.val[0]);
vst1_u32(packed_weights + 2, v00.val[1]);
packed_weights += 4;
}
// KC remainder
for (; k != 0; --k) {
v00.val[0] = vld1_lane_u32(w0, v00.val[0], 0); w0 += 1;
v00.val[0] = vld1_lane_u32(w1, v00.val[0], 1); w1 += 1;
vst1_u32(packed_weights + 0, v00.val[0]);
packed_weights += 2;
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w1;
}
if XNN_UNLIKELY(n != 0) {
// NC remainder of 1
if XNN_LIKELY(bias != NULL) {
*packed_weights = *bias++;
} else {
const uint32x2_t vzero = vmov_n_u32(0);
vst1_u32(packed_weights + 0, vzero);
}
packed_weights += 2;
size_t k = kc;
do {
*packed_weights = *w0++;
packed_weights += 2;
} while (--k);
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 2,501
| 24.02
| 78
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x2-gemm-goi-scalar-float-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x2__scalar_float_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 2);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
float* out = (float*) packed_weights;
const float* b = (const float*) bias;
do {
// NC main loop multiple of 2
const float* w0 = (const float*) weights;
size_t n = nc;
for (;n >= 2; n -= 2) {
if XNN_LIKELY(b != NULL) {
out[0] = b[0];
out[1] = b[1];
b += 2;
} else {
out[0] = 0;
out[1] = 0;
}
out += 2;
const float* w1 = w0 + kc;
// KC main loop multiple of 2x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const float v00 = w0[0];
const float v01 = w0[1];
const float v02 = w0[2];
const float v03 = w0[3];
w0 += 4;
const float v10 = w1[0];
const float v11 = w1[1];
const float v12 = w1[2];
const float v13 = w1[3];
w1 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v01;
out[3] = v11;
out[4] = v02;
out[5] = v12;
out[6] = v03;
out[7] = v13;
out += 8;
}
// KC remainder
for (; k != 0; --k) {
const float v0 = *w0++;
out[0] = v0;
const float v1 = *w1++;
out[1] = v1;
out += 2;
}
out = (float*) ((uintptr_t) out + extra_bytes);
w0 = w1;
}
// NC remainder (1..1)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*out++ = *b++;
} while (--nb != 0);
} else {
size_t nb = n;
do {
*out++ = 0;
} while (--nb != 0);
}
out += (2 - n);
// KC main loop multiple of 2x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const float v00 = w0[0];
const float v01 = w0[1];
const float v02 = w0[2];
const float v03 = w0[3];
w0 += 4;
out[0] = v00;
out[2] = v01;
out[4] = v02;
out[6] = v03;
out += 8;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const float v0 = *w0++;
out[0] = v0;
out += 2;
}
out = (float*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 3,012
| 20.521429
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x2-gemm-goi-scalar-int-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x2__scalar_int_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 2);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint32_t* out = (uint32_t*) packed_weights;
const uint32_t* b = (const uint32_t*) bias;
do {
// NC main loop multiple of 2
const uint32_t* w0 = (const uint32_t*) weights;
size_t n = nc;
for (;n >= 2; n -= 2) {
if XNN_LIKELY(b != NULL) {
out[0] = b[0];
out[1] = b[1];
b += 2;
} else {
out[0] = 0;
out[1] = 0;
}
out += 2;
const uint32_t* w1 = w0 + kc;
// KC main loop multiple of 2x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const uint32_t v00 = w0[0];
const uint32_t v01 = w0[1];
const uint32_t v02 = w0[2];
const uint32_t v03 = w0[3];
w0 += 4;
const uint32_t v10 = w1[0];
const uint32_t v11 = w1[1];
const uint32_t v12 = w1[2];
const uint32_t v13 = w1[3];
w1 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v01;
out[3] = v11;
out[4] = v02;
out[5] = v12;
out[6] = v03;
out[7] = v13;
out += 8;
}
// KC remainder
for (; k != 0; --k) {
const uint32_t v0 = *w0++;
out[0] = v0;
const uint32_t v1 = *w1++;
out[1] = v1;
out += 2;
}
out = (uint32_t*) ((uintptr_t) out + extra_bytes);
w0 = w1;
}
// NC remainder (1..1)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*out++ = *b++;
} while (--nb != 0);
} else {
size_t nb = n;
do {
*out++ = 0;
} while (--nb != 0);
}
out += (2 - n);
// KC main loop multiple of 2x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const uint32_t v00 = w0[0];
const uint32_t v01 = w0[1];
const uint32_t v02 = w0[2];
const uint32_t v03 = w0[3];
w0 += 4;
out[0] = v00;
out[2] = v01;
out[4] = v02;
out[6] = v03;
out += 8;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const uint32_t v0 = *w0++;
out[0] = v0;
out += 2;
}
out = (uint32_t*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 3,082
| 21.021429
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x2c4-gemm-goi-sse2-x4-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/c4-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/packw.h>
#include <xnnpack/prefetch.h>
void xnn_x32_packw_gemm_goi_ukernel_x2c4__sse2_x4_prfm(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 2);
assert(kr == 4);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
__m128 v0;
__m128 v1;
const float* b = (const float*) bias;
float* packed_w = (float*) packed_weights;
do {
// NC main loop multiple of 2
const float* w0 = (const float*) weights;
size_t n = nc;
for (; n >= 2; n -= 2) {
if XNN_LIKELY(b != NULL) {
packed_w[0] = b[0];
packed_w[1] = b[1];
b += 2;
} else {
packed_w[0] = 0.0f;
packed_w[1] = 0.0f;
}
packed_w += 2;
const float* w1 = w0 + kc;
xnn_prefetch_to_l1((const int8_t*) w0);
xnn_prefetch_to_l1((const int8_t*) w0 + 64);
xnn_prefetch_to_l1((const int8_t*) w1);
xnn_prefetch_to_l1((const int8_t*) w1 + 64);
// KC main loop multiple of 2x4
size_t k = kc;
for (; k >= 4; k -= 4) {
// Read blocks of 2x4
// a b c d
// e f g h
v0 = _mm_loadu_ps(w0);
w0 += 4;
v1 = _mm_loadu_ps(w1);
w1 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
_mm_storeu_ps(packed_w, v0);
_mm_storeu_ps(packed_w + 4, v1);
packed_w += 8;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
case 1:
// Read blocks of 2x1
// a
// e
v0 = _mm_load_ss(w0);
w0 += 1;
v1 = _mm_load_ss(w1);
w1 += 1;
break;
case 2:
// Read blocks of 2x2
// a b
// e f
v0 = _mm_castpd_ps(_mm_load_sd((const double*) w0));
w0 += 2;
v1 = _mm_castpd_ps(_mm_load_sd((const double*) w1));
w1 += 2;
break;
case 3:
{
// Read blocks of 2x3
// a b c
// e f g
const __m128 v0lo = _mm_castpd_ps(_mm_load_sd((const double*) w0));
const __m128 v0hi = _mm_load_ss(w0 + 2);
v0 = _mm_movelh_ps(v0lo, v0hi);
w0 += 3;
const __m128 v1lo = _mm_castpd_ps(_mm_load_sd((const double*) w1));
const __m128 v1hi = _mm_load_ss(w1 + 2);
v1 = _mm_movelh_ps(v1lo, v1hi);
w1 += 3;
break;
}
default:
XNN_UNREACHABLE;
}
_mm_storeu_ps(packed_w, v0);
_mm_storeu_ps(packed_w + 4, v1);
packed_w += 8;
}
packed_w = (float*) ((uintptr_t) packed_w + extra_bytes);
w0 = w1;
}
// NC remainder (1..1)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 1);
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*packed_w++ = *b++;
} while (--nb != 0);
packed_w += (2 - n);
} else {
packed_w[0] = 0.0f;
packed_w[1] = 0.0f;
packed_w += 2;
}
// NR remainder has less than 2 rows so last row is not loaded
// KC main loop multiple of 2x4
size_t k = kc;
for (; k >= 4; k -= 4) {
// Read blocks of 2x4
// a b c d
// e f g h
v0 = _mm_loadu_ps(w0);
w0 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
_mm_storeu_ps(packed_w, v0);
_mm_storeu_ps(packed_w + 4, v0);
packed_w += 8;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
case 1:
// Read blocks of 1x1
// a
v0 = _mm_load_ss(w0);
w0 += 1;
break;
case 2:
// Read blocks of 1x2
// a b
v0 = _mm_castpd_ps(_mm_load_sd((const double*) w0));
w0 += 2;
break;
case 3:
{
// Read blocks of 1x3
// a b c
const __m128 v0lo = _mm_castpd_ps(_mm_load_sd((const double*) w0));
const __m128 v0hi = _mm_load_ss(w0 + 2);
v0 = _mm_movelh_ps(v0lo, v0hi);
w0 += 3;
break;
}
default:
XNN_UNREACHABLE;
}
_mm_storeu_ps(packed_w, v0);
_mm_storeu_ps(packed_w + 4, v0);
packed_w += 8;
}
packed_w = (float*) ((uintptr_t) packed_w + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 5,268
| 24.57767
| 79
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x2c4-gemm-goi-sse2-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/c4-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x2c4__sse2_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 2);
assert(kr == 4);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
__m128 v0;
__m128 v1;
const float* b = (const float*) bias;
float* packed_w = (float*) packed_weights;
do {
// NC main loop multiple of 2
const float* w0 = (const float*) weights;
size_t n = nc;
for (; n >= 2; n -= 2) {
if XNN_LIKELY(b != NULL) {
packed_w[0] = b[0];
packed_w[1] = b[1];
b += 2;
} else {
packed_w[0] = 0.0f;
packed_w[1] = 0.0f;
}
packed_w += 2;
const float* w1 = w0 + kc;
// KC main loop multiple of 2x4
size_t k = kc;
for (; k >= 4; k -= 4) {
// Read blocks of 2x4
// a b c d
// e f g h
v0 = _mm_loadu_ps(w0);
w0 += 4;
v1 = _mm_loadu_ps(w1);
w1 += 4;
_mm_storeu_ps(packed_w, v0);
_mm_storeu_ps(packed_w + 4, v1);
packed_w += 8;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
case 1:
// Read blocks of 2x1
// a
// e
v0 = _mm_load_ss(w0);
w0 += 1;
v1 = _mm_load_ss(w1);
w1 += 1;
break;
case 2:
// Read blocks of 2x2
// a b
// e f
v0 = _mm_castpd_ps(_mm_load_sd((const double*) w0));
w0 += 2;
v1 = _mm_castpd_ps(_mm_load_sd((const double*) w1));
w1 += 2;
break;
case 3:
{
// Read blocks of 2x3
// a b c
// e f g
const __m128 v0lo = _mm_castpd_ps(_mm_load_sd((const double*) w0));
const __m128 v0hi = _mm_load_ss(w0 + 2);
v0 = _mm_movelh_ps(v0lo, v0hi);
w0 += 3;
const __m128 v1lo = _mm_castpd_ps(_mm_load_sd((const double*) w1));
const __m128 v1hi = _mm_load_ss(w1 + 2);
v1 = _mm_movelh_ps(v1lo, v1hi);
w1 += 3;
break;
}
default:
XNN_UNREACHABLE;
}
_mm_storeu_ps(packed_w, v0);
_mm_storeu_ps(packed_w + 4, v1);
packed_w += 8;
}
packed_w = (float*) ((uintptr_t) packed_w + extra_bytes);
w0 = w1;
}
// NC remainder (1..1)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 1);
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*packed_w++ = *b++;
} while (--nb != 0);
packed_w += (2 - n);
} else {
packed_w[0] = 0.0f;
packed_w[1] = 0.0f;
packed_w += 2;
}
// NR remainder has less than 2 rows so last row is not loaded
// KC main loop multiple of 2x4
size_t k = kc;
for (; k >= 4; k -= 4) {
// Read blocks of 2x4
// a b c d
// e f g h
v0 = _mm_loadu_ps(w0);
w0 += 4;
_mm_storeu_ps(packed_w, v0);
_mm_storeu_ps(packed_w + 4, v0);
packed_w += 8;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
case 1:
// Read blocks of 1x1
// a
v0 = _mm_load_ss(w0);
w0 += 1;
break;
case 2:
// Read blocks of 1x2
// a b
v0 = _mm_castpd_ps(_mm_load_sd((const double*) w0));
w0 += 2;
break;
case 3:
{
// Read blocks of 1x3
// a b c
const __m128 v0lo = _mm_castpd_ps(_mm_load_sd((const double*) w0));
const __m128 v0hi = _mm_load_ss(w0 + 2);
v0 = _mm_movelh_ps(v0lo, v0hi);
w0 += 3;
break;
}
default:
XNN_UNREACHABLE;
}
_mm_storeu_ps(packed_w, v0);
_mm_storeu_ps(packed_w + 4, v0);
packed_w += 8;
}
packed_w = (float*) ((uintptr_t) packed_w + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 4,877
| 23.636364
| 79
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x2c4-gemm-goi-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x2c4__wasmsimd_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 2);
assert(kr == 4);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
do {
// NC main loop multiple of 2
const uint32_t* w0 = (const uint32_t*) weights;
size_t n = nc;
for (; n >= 2; n -= 2) {
if XNN_LIKELY(bias != NULL) {
packed_weights[0] = bias[0];
packed_weights[1] = bias[1];
bias += 2;
} else {
packed_weights[0] = 0;
packed_weights[1] = 0;
}
packed_weights += 2;
const uint32_t* w1 = w0 + kc;
// KC main loop multiple of 2x4
size_t k = kc;
for (; k >= 4; k -= 4) {
// Read blocks of 2x4
// a b c d
// e f g h
const v128_t v0 = wasm_v128_load(w0);
w0 += 4;
const v128_t v1 = wasm_v128_load(w1);
w1 += 4;
wasm_v128_store(packed_weights, v0);
wasm_v128_store(packed_weights + 4, v1);
packed_weights += 8;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
case 1:
{
// Read blocks of 2x1
// a
// e
const v128_t v0 = wasm_v128_load32_zero(w0);
++w0;
const v128_t v1 = wasm_v128_load32_zero(w1);
++w1;
wasm_v128_store(packed_weights, v0);
wasm_v128_store(packed_weights + 4, v1);
packed_weights += 8;
break;
}
case 2:
{
// Read blocks of 2x2
// a b
// e f
const v128_t v0 = wasm_v128_load64_zero(w0);
w0 += 2;
const v128_t v1 = wasm_v128_load64_zero(w1);
w1 += 2;
wasm_v128_store(packed_weights, v0);
wasm_v128_store(packed_weights + 4, v1);
packed_weights += 8;
break;
}
case 3:
{
// Read blocks of 2x3
// a b c
// e f g
v128_t v0 = wasm_v128_load64_zero(w0);
v0 = wasm_v128_load32_lane(w0 + 2, v0, 2);
w0 += 3;
v128_t v1 = wasm_v128_load64_zero(w1);
v1 = wasm_v128_load32_lane(w1 + 2, v1, 2);
w1 += 3;
wasm_v128_store(packed_weights, v0);
wasm_v128_store(packed_weights + 4, v1);
packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w1;
}
// NC remainder (1..1)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 1);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (2 - n);
} else {
packed_weights[0] = 0;
packed_weights[1] = 0;
packed_weights += 2;
}
// NR remainder has less than 2 rows so last row is not loaded
// KC main loop multiple of 2x4
size_t k = kc;
for (; k >= 4; k -= 4) {
// Read blocks of 2x4
// a b c d
// e f g h
const v128_t v0 = wasm_v128_load(w0);
w0 += 4;
wasm_v128_store(packed_weights, v0);
wasm_v128_store(packed_weights + 4, v0);
packed_weights += 8;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
case 1:
{
// Read blocks of 1x1
// a
const v128_t v0 = wasm_v128_load32_zero(w0);
++w0;
wasm_v128_store(packed_weights, v0);
wasm_v128_store(packed_weights + 4, v0);
packed_weights += 8;
break;
}
case 2:
{
// Read blocks of 1x2
// a b
const v128_t v0 = wasm_v128_load64_zero(w0);
w0 += 2;
wasm_v128_store(packed_weights, v0);
wasm_v128_store(packed_weights + 4, v0);
packed_weights += 8;
break;
}
case 3:
{
// Read blocks of 1x3
// a b c
v128_t v0 = wasm_v128_load64_zero(w0);
v0 = wasm_v128_load32_lane(w0 + 2, v0, 2);
w0 += 3;
wasm_v128_store(packed_weights, v0);
wasm_v128_store(packed_weights + 4, v0);
packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 5,501
| 24.71028
| 78
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x3-gemm-goi-scalar-float-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x3__scalar_float_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 3);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
float* out = (float*) packed_weights;
const float* b = (const float*) bias;
do {
// NC main loop multiple of 3
const float* w0 = (const float*) weights;
size_t n = nc;
for (;n >= 3; n -= 3) {
if XNN_LIKELY(b != NULL) {
out[0] = b[0];
out[1] = b[1];
out[2] = b[2];
b += 3;
} else {
out[0] = 0;
out[1] = 0;
out[2] = 0;
}
out += 3;
const float* w1 = w0 + kc;
const float* w2 = w1 + kc;
// KC main loop multiple of 3x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const float v00 = w0[0];
const float v01 = w0[1];
const float v02 = w0[2];
const float v03 = w0[3];
w0 += 4;
const float v10 = w1[0];
const float v11 = w1[1];
const float v12 = w1[2];
const float v13 = w1[3];
w1 += 4;
const float v20 = w2[0];
const float v21 = w2[1];
const float v22 = w2[2];
const float v23 = w2[3];
w2 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v01;
out[4] = v11;
out[5] = v21;
out[6] = v02;
out[7] = v12;
out[8] = v22;
out[9] = v03;
out[10] = v13;
out[11] = v23;
out += 12;
}
// KC remainder
for (; k != 0; --k) {
const float v0 = *w0++;
out[0] = v0;
const float v1 = *w1++;
out[1] = v1;
const float v2 = *w2++;
out[2] = v2;
out += 3;
}
out = (float*) ((uintptr_t) out + extra_bytes);
w0 = w2;
}
// NC remainder (1..2)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*out++ = *b++;
} while (--nb != 0);
} else {
size_t nb = n;
do {
*out++ = 0;
} while (--nb != 0);
}
out += (3 - n);
// NR remainder has less than 3 rows so last row is not loaded
const float* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
// KC main loop multiple of 3x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const float v00 = w0[0];
const float v01 = w0[1];
const float v02 = w0[2];
const float v03 = w0[3];
w0 += 4;
const float v10 = w1[0];
const float v11 = w1[1];
const float v12 = w1[2];
const float v13 = w1[3];
w1 += 4;
out[0] = v00;
out[1] = v10;
out[3] = v01;
out[4] = v11;
out[6] = v02;
out[7] = v12;
out[9] = v03;
out[10] = v13;
out += 12;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const float v0 = *w0++;
out[0] = v0;
const float v1 = *w1++;
out[1] = v1;
out += 3;
}
out = (float*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 3,836
| 21.570588
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x3-gemm-goi-scalar-int-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x3__scalar_int_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 3);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint32_t* out = (uint32_t*) packed_weights;
const uint32_t* b = (const uint32_t*) bias;
do {
// NC main loop multiple of 3
const uint32_t* w0 = (const uint32_t*) weights;
size_t n = nc;
for (;n >= 3; n -= 3) {
if XNN_LIKELY(b != NULL) {
out[0] = b[0];
out[1] = b[1];
out[2] = b[2];
b += 3;
} else {
out[0] = 0;
out[1] = 0;
out[2] = 0;
}
out += 3;
const uint32_t* w1 = w0 + kc;
const uint32_t* w2 = w1 + kc;
// KC main loop multiple of 3x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const uint32_t v00 = w0[0];
const uint32_t v01 = w0[1];
const uint32_t v02 = w0[2];
const uint32_t v03 = w0[3];
w0 += 4;
const uint32_t v10 = w1[0];
const uint32_t v11 = w1[1];
const uint32_t v12 = w1[2];
const uint32_t v13 = w1[3];
w1 += 4;
const uint32_t v20 = w2[0];
const uint32_t v21 = w2[1];
const uint32_t v22 = w2[2];
const uint32_t v23 = w2[3];
w2 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v01;
out[4] = v11;
out[5] = v21;
out[6] = v02;
out[7] = v12;
out[8] = v22;
out[9] = v03;
out[10] = v13;
out[11] = v23;
out += 12;
}
// KC remainder
for (; k != 0; --k) {
const uint32_t v0 = *w0++;
out[0] = v0;
const uint32_t v1 = *w1++;
out[1] = v1;
const uint32_t v2 = *w2++;
out[2] = v2;
out += 3;
}
out = (uint32_t*) ((uintptr_t) out + extra_bytes);
w0 = w2;
}
// NC remainder (1..2)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*out++ = *b++;
} while (--nb != 0);
} else {
size_t nb = n;
do {
*out++ = 0;
} while (--nb != 0);
}
out += (3 - n);
// NR remainder has less than 3 rows so last row is not loaded
const uint32_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
// KC main loop multiple of 3x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const uint32_t v00 = w0[0];
const uint32_t v01 = w0[1];
const uint32_t v02 = w0[2];
const uint32_t v03 = w0[3];
w0 += 4;
const uint32_t v10 = w1[0];
const uint32_t v11 = w1[1];
const uint32_t v12 = w1[2];
const uint32_t v13 = w1[3];
w1 += 4;
out[0] = v00;
out[1] = v10;
out[3] = v01;
out[4] = v11;
out[6] = v02;
out[7] = v12;
out[9] = v03;
out[10] = v13;
out += 12;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const uint32_t v0 = *w0++;
out[0] = v0;
const uint32_t v1 = *w1++;
out[1] = v1;
out += 3;
}
out = (uint32_t*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 3,942
| 22.194118
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x4-gemm-goi-scalar-float-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x4__scalar_float_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 4);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
float* out = (float*) packed_weights;
const float* b = (const float*) bias;
do {
// NC main loop multiple of 4
const float* w0 = (const float*) weights;
size_t n = nc;
for (;n >= 4; n -= 4) {
if XNN_LIKELY(b != NULL) {
out[0] = b[0];
out[1] = b[1];
out[2] = b[2];
out[3] = b[3];
b += 4;
} else {
out[0] = 0;
out[1] = 0;
out[2] = 0;
out[3] = 0;
}
out += 4;
const float* w1 = w0 + kc;
const float* w2 = w1 + kc;
const float* w3 = w2 + kc;
// KC main loop multiple of 4x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const float v00 = w0[0];
const float v01 = w0[1];
const float v02 = w0[2];
const float v03 = w0[3];
w0 += 4;
const float v10 = w1[0];
const float v11 = w1[1];
const float v12 = w1[2];
const float v13 = w1[3];
w1 += 4;
const float v20 = w2[0];
const float v21 = w2[1];
const float v22 = w2[2];
const float v23 = w2[3];
w2 += 4;
const float v30 = w3[0];
const float v31 = w3[1];
const float v32 = w3[2];
const float v33 = w3[3];
w3 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v01;
out[5] = v11;
out[6] = v21;
out[7] = v31;
out[8] = v02;
out[9] = v12;
out[10] = v22;
out[11] = v32;
out[12] = v03;
out[13] = v13;
out[14] = v23;
out[15] = v33;
out += 16;
}
// KC remainder
for (; k != 0; --k) {
const float v0 = *w0++;
out[0] = v0;
const float v1 = *w1++;
out[1] = v1;
const float v2 = *w2++;
out[2] = v2;
const float v3 = *w3++;
out[3] = v3;
out += 4;
}
out = (float*) ((uintptr_t) out + extra_bytes);
w0 = w3;
}
// NC remainder (1..3)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*out++ = *b++;
} while (--nb != 0);
} else {
size_t nb = n;
do {
*out++ = 0;
} while (--nb != 0);
}
out += (4 - n);
// NR remainder has less than 4 rows so last row is not loaded
const float* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const float* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
// KC main loop multiple of 4x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const float v00 = w0[0];
const float v01 = w0[1];
const float v02 = w0[2];
const float v03 = w0[3];
w0 += 4;
const float v10 = w1[0];
const float v11 = w1[1];
const float v12 = w1[2];
const float v13 = w1[3];
w1 += 4;
const float v20 = w2[0];
const float v21 = w2[1];
const float v22 = w2[2];
const float v23 = w2[3];
w2 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[4] = v01;
out[5] = v11;
out[6] = v21;
out[8] = v02;
out[9] = v12;
out[10] = v22;
out[12] = v03;
out[13] = v13;
out[14] = v23;
out += 16;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const float v0 = *w0++;
out[0] = v0;
const float v1 = *w1++;
out[1] = v1;
const float v2 = *w2++;
out[2] = v2;
out += 4;
}
out = (float*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 4,594
| 22.090452
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x4-gemm-goi-scalar-int-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x4__scalar_int_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 4);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint32_t* out = (uint32_t*) packed_weights;
const uint32_t* b = (const uint32_t*) bias;
do {
// NC main loop multiple of 4
const uint32_t* w0 = (const uint32_t*) weights;
size_t n = nc;
for (;n >= 4; n -= 4) {
if XNN_LIKELY(b != NULL) {
out[0] = b[0];
out[1] = b[1];
out[2] = b[2];
out[3] = b[3];
b += 4;
} else {
out[0] = 0;
out[1] = 0;
out[2] = 0;
out[3] = 0;
}
out += 4;
const uint32_t* w1 = w0 + kc;
const uint32_t* w2 = w1 + kc;
const uint32_t* w3 = w2 + kc;
// KC main loop multiple of 4x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const uint32_t v00 = w0[0];
const uint32_t v01 = w0[1];
const uint32_t v02 = w0[2];
const uint32_t v03 = w0[3];
w0 += 4;
const uint32_t v10 = w1[0];
const uint32_t v11 = w1[1];
const uint32_t v12 = w1[2];
const uint32_t v13 = w1[3];
w1 += 4;
const uint32_t v20 = w2[0];
const uint32_t v21 = w2[1];
const uint32_t v22 = w2[2];
const uint32_t v23 = w2[3];
w2 += 4;
const uint32_t v30 = w3[0];
const uint32_t v31 = w3[1];
const uint32_t v32 = w3[2];
const uint32_t v33 = w3[3];
w3 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v01;
out[5] = v11;
out[6] = v21;
out[7] = v31;
out[8] = v02;
out[9] = v12;
out[10] = v22;
out[11] = v32;
out[12] = v03;
out[13] = v13;
out[14] = v23;
out[15] = v33;
out += 16;
}
// KC remainder
for (; k != 0; --k) {
const uint32_t v0 = *w0++;
out[0] = v0;
const uint32_t v1 = *w1++;
out[1] = v1;
const uint32_t v2 = *w2++;
out[2] = v2;
const uint32_t v3 = *w3++;
out[3] = v3;
out += 4;
}
out = (uint32_t*) ((uintptr_t) out + extra_bytes);
w0 = w3;
}
// NC remainder (1..3)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*out++ = *b++;
} while (--nb != 0);
} else {
size_t nb = n;
do {
*out++ = 0;
} while (--nb != 0);
}
out += (4 - n);
// NR remainder has less than 4 rows so last row is not loaded
const uint32_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint32_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
// KC main loop multiple of 4x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const uint32_t v00 = w0[0];
const uint32_t v01 = w0[1];
const uint32_t v02 = w0[2];
const uint32_t v03 = w0[3];
w0 += 4;
const uint32_t v10 = w1[0];
const uint32_t v11 = w1[1];
const uint32_t v12 = w1[2];
const uint32_t v13 = w1[3];
w1 += 4;
const uint32_t v20 = w2[0];
const uint32_t v21 = w2[1];
const uint32_t v22 = w2[2];
const uint32_t v23 = w2[3];
w2 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[4] = v01;
out[5] = v11;
out[6] = v21;
out[8] = v02;
out[9] = v12;
out[10] = v22;
out[12] = v03;
out[13] = v13;
out[14] = v23;
out += 16;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const uint32_t v0 = *w0++;
out[0] = v0;
const uint32_t v1 = *w1++;
out[1] = v1;
const uint32_t v2 = *w2++;
out[2] = v2;
out += 4;
}
out = (uint32_t*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 4,736
| 22.80402
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x8-gemm-goi-neon-ld4lane-x4-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
#include <xnnpack/prefetch.h>
void xnn_x32_packw_gemm_goi_ukernel_x8__neon_ld4lane_x4_prfm(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint32x4x4_t vtmp0123x0123;
uint32x4x4_t vtmp0123x4567;
do {
// NC main loop multiple of 8
const uint32_t* w0 = weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(bias != NULL) {
uint32x4_t vb0 = vld1q_u32(bias); bias += 4;
uint32x4_t vb4 = vld1q_u32(bias); bias += 4;
vst1q_u32(packed_weights, vb0); packed_weights += 4;
vst1q_u32(packed_weights, vb4); packed_weights += 4;
} else {
const uint32x4_t vzero = vmovq_n_u32(0);
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
}
const uint32_t* w1 = w0 + kc;
const uint32_t* w2 = w1 + kc;
const uint32_t* w3 = w2 + kc;
const uint32_t* w4 = w3 + kc;
const uint32_t* w5 = w4 + kc;
const uint32_t* w6 = w5 + kc;
const uint32_t* w7 = w6 + kc;
xnn_prefetch_to_l1((const int8_t*) w0);
xnn_prefetch_to_l1((const int8_t*) w0 + 64);
xnn_prefetch_to_l1((const int8_t*) w1);
xnn_prefetch_to_l1((const int8_t*) w1 + 64);
xnn_prefetch_to_l1((const int8_t*) w2);
xnn_prefetch_to_l1((const int8_t*) w2 + 64);
xnn_prefetch_to_l1((const int8_t*) w3);
xnn_prefetch_to_l1((const int8_t*) w3 + 64);
xnn_prefetch_to_l1((const int8_t*) w4);
xnn_prefetch_to_l1((const int8_t*) w4 + 64);
xnn_prefetch_to_l1((const int8_t*) w5);
xnn_prefetch_to_l1((const int8_t*) w5 + 64);
xnn_prefetch_to_l1((const int8_t*) w6);
xnn_prefetch_to_l1((const int8_t*) w6 + 64);
xnn_prefetch_to_l1((const int8_t*) w7);
xnn_prefetch_to_l1((const int8_t*) w7 + 64);
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vtmp0123x4567 = vld4q_lane_u32(w7, vtmp0123x4567, 3); w7 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
xnn_prefetch_to_l1((const int8_t*) w7 + 128);
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 1
case 1:
{
uint32x4_t vtmp0x0123 = vdupq_n_u32(0);
uint32x4_t vtmp0x4567 = vdupq_n_u32(0);
vtmp0x0123 = vld1q_lane_u32(w0, vtmp0x0123, 0); w0 += 1;
vtmp0x0123 = vld1q_lane_u32(w1, vtmp0x0123, 1); w1 += 1;
vtmp0x0123 = vld1q_lane_u32(w2, vtmp0x0123, 2); w2 += 1;
vtmp0x0123 = vld1q_lane_u32(w3, vtmp0x0123, 3); w3 += 1;
vtmp0x4567 = vld1q_lane_u32(w4, vtmp0x4567, 0); w4 += 1;
vtmp0x4567 = vld1q_lane_u32(w5, vtmp0x4567, 1); w5 += 1;
vtmp0x4567 = vld1q_lane_u32(w6, vtmp0x4567, 2); w6 += 1;
vtmp0x4567 = vld1q_lane_u32(w7, vtmp0x4567, 3); w7 += 1;
vst1q_u32(packed_weights, vtmp0x0123); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x4567); packed_weights += 4;
break;
}
// KC remainder of 2
case 2:
{
uint32x4x2_t vtmp01x0123;
vtmp01x0123.val[0] = vdupq_n_u32(0);
vtmp01x0123.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x4567;
vtmp01x4567.val[0] = vdupq_n_u32(0);
vtmp01x4567.val[1] = vdupq_n_u32(0);
vtmp01x0123 = vld2q_lane_u32(w0, vtmp01x0123, 0); w0 += 2;
vtmp01x0123 = vld2q_lane_u32(w1, vtmp01x0123, 1); w1 += 2;
vtmp01x0123 = vld2q_lane_u32(w2, vtmp01x0123, 2); w2 += 2;
vtmp01x0123 = vld2q_lane_u32(w3, vtmp01x0123, 3); w3 += 2;
vtmp01x4567 = vld2q_lane_u32(w4, vtmp01x4567, 0); w4 += 2;
vtmp01x4567 = vld2q_lane_u32(w5, vtmp01x4567, 1); w5 += 2;
vtmp01x4567 = vld2q_lane_u32(w6, vtmp01x4567, 2); w6 += 2;
vtmp01x4567 = vld2q_lane_u32(w7, vtmp01x4567, 3); w7 += 2;
vst1q_u32(packed_weights, vtmp01x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[1]); packed_weights += 4;
break;
}
// KC remainder of 3
case 3:
{
uint32x4x3_t vtmp012x0123;
vtmp012x0123.val[0] = vdupq_n_u32(0);
vtmp012x0123.val[1] = vdupq_n_u32(0);
vtmp012x0123.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x4567;
vtmp012x4567.val[0] = vdupq_n_u32(0);
vtmp012x4567.val[1] = vdupq_n_u32(0);
vtmp012x4567.val[2] = vdupq_n_u32(0);
vtmp012x0123 = vld3q_lane_u32(w0, vtmp012x0123, 0); w0 += 3;
vtmp012x0123 = vld3q_lane_u32(w1, vtmp012x0123, 1); w1 += 3;
vtmp012x0123 = vld3q_lane_u32(w2, vtmp012x0123, 2); w2 += 3;
vtmp012x0123 = vld3q_lane_u32(w3, vtmp012x0123, 3); w3 += 3;
vtmp012x4567 = vld3q_lane_u32(w4, vtmp012x4567, 0); w4 += 3;
vtmp012x4567 = vld3q_lane_u32(w5, vtmp012x4567, 1); w5 += 3;
vtmp012x4567 = vld3q_lane_u32(w6, vtmp012x4567, 2); w6 += 3;
vtmp012x4567 = vld3q_lane_u32(w7, vtmp012x4567, 3); w7 += 3;
vst1q_u32(packed_weights, vtmp012x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[2]); packed_weights += 4;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (8 - n);
} else {
const uint32x4_t vzero = vmovq_n_u32(0);
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
}
// NR remainder has less than 8 rows so last row is not loaded
const uint32_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint32_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint32_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint32_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint32_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint32_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 1
case 1:
{
uint32x4_t vtmp0x0123 = vdupq_n_u32(0);
uint32x4_t vtmp0x4567 = vdupq_n_u32(0);
vtmp0x0123 = vld1q_lane_u32(w0, vtmp0x0123, 0);
vtmp0x0123 = vld1q_lane_u32(w1, vtmp0x0123, 1);
vtmp0x0123 = vld1q_lane_u32(w2, vtmp0x0123, 2);
vtmp0x0123 = vld1q_lane_u32(w3, vtmp0x0123, 3);
vtmp0x4567 = vld1q_lane_u32(w4, vtmp0x4567, 0);
vtmp0x4567 = vld1q_lane_u32(w5, vtmp0x4567, 1);
vtmp0x4567 = vld1q_lane_u32(w6, vtmp0x4567, 2);
vst1q_u32(packed_weights, vtmp0x0123); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x4567); packed_weights += 4;
break;
}
// KC remainder of 2
case 2:
{
uint32x4x2_t vtmp01x0123;
vtmp01x0123.val[0] = vdupq_n_u32(0);
vtmp01x0123.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x4567;
vtmp01x4567.val[0] = vdupq_n_u32(0);
vtmp01x4567.val[1] = vdupq_n_u32(0);
vtmp01x0123 = vld2q_lane_u32(w0, vtmp01x0123, 0);
vtmp01x0123 = vld2q_lane_u32(w1, vtmp01x0123, 1);
vtmp01x0123 = vld2q_lane_u32(w2, vtmp01x0123, 2);
vtmp01x0123 = vld2q_lane_u32(w3, vtmp01x0123, 3);
vtmp01x4567 = vld2q_lane_u32(w4, vtmp01x4567, 0);
vtmp01x4567 = vld2q_lane_u32(w5, vtmp01x4567, 1);
vtmp01x4567 = vld2q_lane_u32(w6, vtmp01x4567, 2);
vst1q_u32(packed_weights, vtmp01x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[1]); packed_weights += 4;
break;
}
// KC remainder of 3
case 3:
{
uint32x4x3_t vtmp012x0123;
vtmp012x0123.val[0] = vdupq_n_u32(0);
vtmp012x0123.val[1] = vdupq_n_u32(0);
vtmp012x0123.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x4567;
vtmp012x4567.val[0] = vdupq_n_u32(0);
vtmp012x4567.val[1] = vdupq_n_u32(0);
vtmp012x4567.val[2] = vdupq_n_u32(0);
vtmp012x0123 = vld3q_lane_u32(w0, vtmp012x0123, 0);
vtmp012x0123 = vld3q_lane_u32(w1, vtmp012x0123, 1);
vtmp012x0123 = vld3q_lane_u32(w2, vtmp012x0123, 2);
vtmp012x0123 = vld3q_lane_u32(w3, vtmp012x0123, 3);
vtmp012x4567 = vld3q_lane_u32(w4, vtmp012x4567, 0);
vtmp012x4567 = vld3q_lane_u32(w5, vtmp012x4567, 1);
vtmp012x4567 = vld3q_lane_u32(w6, vtmp012x4567, 2);
vst1q_u32(packed_weights, vtmp012x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[2]); packed_weights += 4;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 14,668
| 41.518841
| 80
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x8-gemm-goi-neon-ld4lane-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x8__neon_ld4lane_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint32x4x4_t vtmp0123x0123;
uint32x4x4_t vtmp0123x4567;
do {
// NC main loop multiple of 8
const uint32_t* w0 = weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(bias != NULL) {
uint32x4_t vb0 = vld1q_u32(bias); bias += 4;
uint32x4_t vb4 = vld1q_u32(bias); bias += 4;
vst1q_u32(packed_weights, vb0); packed_weights += 4;
vst1q_u32(packed_weights, vb4); packed_weights += 4;
} else {
const uint32x4_t vzero = vmovq_n_u32(0);
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
}
const uint32_t* w1 = w0 + kc;
const uint32_t* w2 = w1 + kc;
const uint32_t* w3 = w2 + kc;
const uint32_t* w4 = w3 + kc;
const uint32_t* w5 = w4 + kc;
const uint32_t* w6 = w5 + kc;
const uint32_t* w7 = w6 + kc;
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vtmp0123x4567 = vld4q_lane_u32(w7, vtmp0123x4567, 3); w7 += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 1
case 1:
{
uint32x4_t vtmp0x0123 = vdupq_n_u32(0);
uint32x4_t vtmp0x4567 = vdupq_n_u32(0);
vtmp0x0123 = vld1q_lane_u32(w0, vtmp0x0123, 0); w0 += 1;
vtmp0x0123 = vld1q_lane_u32(w1, vtmp0x0123, 1); w1 += 1;
vtmp0x0123 = vld1q_lane_u32(w2, vtmp0x0123, 2); w2 += 1;
vtmp0x0123 = vld1q_lane_u32(w3, vtmp0x0123, 3); w3 += 1;
vtmp0x4567 = vld1q_lane_u32(w4, vtmp0x4567, 0); w4 += 1;
vtmp0x4567 = vld1q_lane_u32(w5, vtmp0x4567, 1); w5 += 1;
vtmp0x4567 = vld1q_lane_u32(w6, vtmp0x4567, 2); w6 += 1;
vtmp0x4567 = vld1q_lane_u32(w7, vtmp0x4567, 3); w7 += 1;
vst1q_u32(packed_weights, vtmp0x0123); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x4567); packed_weights += 4;
break;
}
// KC remainder of 2
case 2:
{
uint32x4x2_t vtmp01x0123;
vtmp01x0123.val[0] = vdupq_n_u32(0);
vtmp01x0123.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x4567;
vtmp01x4567.val[0] = vdupq_n_u32(0);
vtmp01x4567.val[1] = vdupq_n_u32(0);
vtmp01x0123 = vld2q_lane_u32(w0, vtmp01x0123, 0); w0 += 2;
vtmp01x0123 = vld2q_lane_u32(w1, vtmp01x0123, 1); w1 += 2;
vtmp01x0123 = vld2q_lane_u32(w2, vtmp01x0123, 2); w2 += 2;
vtmp01x0123 = vld2q_lane_u32(w3, vtmp01x0123, 3); w3 += 2;
vtmp01x4567 = vld2q_lane_u32(w4, vtmp01x4567, 0); w4 += 2;
vtmp01x4567 = vld2q_lane_u32(w5, vtmp01x4567, 1); w5 += 2;
vtmp01x4567 = vld2q_lane_u32(w6, vtmp01x4567, 2); w6 += 2;
vtmp01x4567 = vld2q_lane_u32(w7, vtmp01x4567, 3); w7 += 2;
vst1q_u32(packed_weights, vtmp01x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[1]); packed_weights += 4;
break;
}
// KC remainder of 3
case 3:
{
uint32x4x3_t vtmp012x0123;
vtmp012x0123.val[0] = vdupq_n_u32(0);
vtmp012x0123.val[1] = vdupq_n_u32(0);
vtmp012x0123.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x4567;
vtmp012x4567.val[0] = vdupq_n_u32(0);
vtmp012x4567.val[1] = vdupq_n_u32(0);
vtmp012x4567.val[2] = vdupq_n_u32(0);
vtmp012x0123 = vld3q_lane_u32(w0, vtmp012x0123, 0); w0 += 3;
vtmp012x0123 = vld3q_lane_u32(w1, vtmp012x0123, 1); w1 += 3;
vtmp012x0123 = vld3q_lane_u32(w2, vtmp012x0123, 2); w2 += 3;
vtmp012x0123 = vld3q_lane_u32(w3, vtmp012x0123, 3); w3 += 3;
vtmp012x4567 = vld3q_lane_u32(w4, vtmp012x4567, 0); w4 += 3;
vtmp012x4567 = vld3q_lane_u32(w5, vtmp012x4567, 1); w5 += 3;
vtmp012x4567 = vld3q_lane_u32(w6, vtmp012x4567, 2); w6 += 3;
vtmp012x4567 = vld3q_lane_u32(w7, vtmp012x4567, 3); w7 += 3;
vst1q_u32(packed_weights, vtmp012x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[2]); packed_weights += 4;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (8 - n);
} else {
const uint32x4_t vzero = vmovq_n_u32(0);
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
}
// NR remainder has less than 8 rows so last row is not loaded
const uint32_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint32_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint32_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint32_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint32_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint32_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 1
case 1:
{
uint32x4_t vtmp0x0123 = vdupq_n_u32(0);
uint32x4_t vtmp0x4567 = vdupq_n_u32(0);
vtmp0x0123 = vld1q_lane_u32(w0, vtmp0x0123, 0);
vtmp0x0123 = vld1q_lane_u32(w1, vtmp0x0123, 1);
vtmp0x0123 = vld1q_lane_u32(w2, vtmp0x0123, 2);
vtmp0x0123 = vld1q_lane_u32(w3, vtmp0x0123, 3);
vtmp0x4567 = vld1q_lane_u32(w4, vtmp0x4567, 0);
vtmp0x4567 = vld1q_lane_u32(w5, vtmp0x4567, 1);
vtmp0x4567 = vld1q_lane_u32(w6, vtmp0x4567, 2);
vst1q_u32(packed_weights, vtmp0x0123); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x4567); packed_weights += 4;
break;
}
// KC remainder of 2
case 2:
{
uint32x4x2_t vtmp01x0123;
vtmp01x0123.val[0] = vdupq_n_u32(0);
vtmp01x0123.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x4567;
vtmp01x4567.val[0] = vdupq_n_u32(0);
vtmp01x4567.val[1] = vdupq_n_u32(0);
vtmp01x0123 = vld2q_lane_u32(w0, vtmp01x0123, 0);
vtmp01x0123 = vld2q_lane_u32(w1, vtmp01x0123, 1);
vtmp01x0123 = vld2q_lane_u32(w2, vtmp01x0123, 2);
vtmp01x0123 = vld2q_lane_u32(w3, vtmp01x0123, 3);
vtmp01x4567 = vld2q_lane_u32(w4, vtmp01x4567, 0);
vtmp01x4567 = vld2q_lane_u32(w5, vtmp01x4567, 1);
vtmp01x4567 = vld2q_lane_u32(w6, vtmp01x4567, 2);
vst1q_u32(packed_weights, vtmp01x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[1]); packed_weights += 4;
break;
}
// KC remainder of 3
case 3:
{
uint32x4x3_t vtmp012x0123;
vtmp012x0123.val[0] = vdupq_n_u32(0);
vtmp012x0123.val[1] = vdupq_n_u32(0);
vtmp012x0123.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x4567;
vtmp012x4567.val[0] = vdupq_n_u32(0);
vtmp012x4567.val[1] = vdupq_n_u32(0);
vtmp012x4567.val[2] = vdupq_n_u32(0);
vtmp012x0123 = vld3q_lane_u32(w0, vtmp012x0123, 0);
vtmp012x0123 = vld3q_lane_u32(w1, vtmp012x0123, 1);
vtmp012x0123 = vld3q_lane_u32(w2, vtmp012x0123, 2);
vtmp012x0123 = vld3q_lane_u32(w3, vtmp012x0123, 3);
vtmp012x4567 = vld3q_lane_u32(w4, vtmp012x4567, 0);
vtmp012x4567 = vld3q_lane_u32(w5, vtmp012x4567, 1);
vtmp012x4567 = vld3q_lane_u32(w6, vtmp012x4567, 2);
vst1q_u32(packed_weights, vtmp012x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[2]); packed_weights += 4;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 13,047
| 40.686901
| 80
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x8-gemm-goi-neon-ld4lane-x8-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
#include <xnnpack/prefetch.h>
void xnn_x32_packw_gemm_goi_ukernel_x8__neon_ld4lane_x8_prfm(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint32x4x4_t vtmp0123x0123;
uint32x4x4_t vtmp4567x0123;
uint32x4x4_t vtmp0123x4567;
uint32x4x4_t vtmp4567x4567;
do {
// NC main loop multiple of 8
const uint32_t* w0 = weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(bias != NULL) {
uint32x4_t vb0 = vld1q_u32(bias); bias += 4;
uint32x4_t vb4 = vld1q_u32(bias); bias += 4;
vst1q_u32(packed_weights, vb0); packed_weights += 4;
vst1q_u32(packed_weights, vb4); packed_weights += 4;
} else {
const uint32x4_t vzero = vmovq_n_u32(0);
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
}
const uint32_t* w1 = w0 + kc;
const uint32_t* w2 = w1 + kc;
const uint32_t* w3 = w2 + kc;
const uint32_t* w4 = w3 + kc;
const uint32_t* w5 = w4 + kc;
const uint32_t* w6 = w5 + kc;
const uint32_t* w7 = w6 + kc;
xnn_prefetch_to_l1((const int8_t*) w0);
xnn_prefetch_to_l1((const int8_t*) w0 + 64);
xnn_prefetch_to_l1((const int8_t*) w1);
xnn_prefetch_to_l1((const int8_t*) w1 + 64);
xnn_prefetch_to_l1((const int8_t*) w2);
xnn_prefetch_to_l1((const int8_t*) w2 + 64);
xnn_prefetch_to_l1((const int8_t*) w3);
xnn_prefetch_to_l1((const int8_t*) w3 + 64);
xnn_prefetch_to_l1((const int8_t*) w4);
xnn_prefetch_to_l1((const int8_t*) w4 + 64);
xnn_prefetch_to_l1((const int8_t*) w5);
xnn_prefetch_to_l1((const int8_t*) w5 + 64);
xnn_prefetch_to_l1((const int8_t*) w6);
xnn_prefetch_to_l1((const int8_t*) w6 + 64);
xnn_prefetch_to_l1((const int8_t*) w7);
xnn_prefetch_to_l1((const int8_t*) w7 + 64);
// KC main loop multiple of 8
size_t k = kc;
for (; k >= 8; k -= 8) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vtmp0123x4567 = vld4q_lane_u32(w7, vtmp0123x4567, 3); w7 += 4;
vtmp4567x0123 = vld4q_lane_u32(w0, vtmp4567x0123, 0); w0 += 4;
vtmp4567x0123 = vld4q_lane_u32(w1, vtmp4567x0123, 1); w1 += 4;
vtmp4567x0123 = vld4q_lane_u32(w2, vtmp4567x0123, 2); w2 += 4;
vtmp4567x0123 = vld4q_lane_u32(w3, vtmp4567x0123, 3); w3 += 4;
vtmp4567x4567 = vld4q_lane_u32(w4, vtmp4567x4567, 0); w4 += 4;
vtmp4567x4567 = vld4q_lane_u32(w5, vtmp4567x4567, 1); w5 += 4;
vtmp4567x4567 = vld4q_lane_u32(w6, vtmp4567x4567, 2); w6 += 4;
vtmp4567x4567 = vld4q_lane_u32(w7, vtmp4567x4567, 3); w7 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
xnn_prefetch_to_l1((const int8_t*) w7 + 128);
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[3]); packed_weights += 4;
}
// KC remainder multiple of 4
if (k >= 4) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vtmp0123x4567 = vld4q_lane_u32(w7, vtmp0123x4567, 3); w7 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
xnn_prefetch_to_l1((const int8_t*) w7 + 128);
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 1
case 1:
{
uint32x4_t vtmp0x0123 = vdupq_n_u32(0);
uint32x4_t vtmp0x4567 = vdupq_n_u32(0);
vtmp0x0123 = vld1q_lane_u32(w0, vtmp0x0123, 0); w0 += 1;
vtmp0x0123 = vld1q_lane_u32(w1, vtmp0x0123, 1); w1 += 1;
vtmp0x0123 = vld1q_lane_u32(w2, vtmp0x0123, 2); w2 += 1;
vtmp0x0123 = vld1q_lane_u32(w3, vtmp0x0123, 3); w3 += 1;
vtmp0x4567 = vld1q_lane_u32(w4, vtmp0x4567, 0); w4 += 1;
vtmp0x4567 = vld1q_lane_u32(w5, vtmp0x4567, 1); w5 += 1;
vtmp0x4567 = vld1q_lane_u32(w6, vtmp0x4567, 2); w6 += 1;
vtmp0x4567 = vld1q_lane_u32(w7, vtmp0x4567, 3); w7 += 1;
vst1q_u32(packed_weights, vtmp0x0123); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x4567); packed_weights += 4;
break;
}
// KC remainder of 2
case 2:
{
uint32x4x2_t vtmp01x0123;
vtmp01x0123.val[0] = vdupq_n_u32(0);
vtmp01x0123.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x4567;
vtmp01x4567.val[0] = vdupq_n_u32(0);
vtmp01x4567.val[1] = vdupq_n_u32(0);
vtmp01x0123 = vld2q_lane_u32(w0, vtmp01x0123, 0); w0 += 2;
vtmp01x0123 = vld2q_lane_u32(w1, vtmp01x0123, 1); w1 += 2;
vtmp01x0123 = vld2q_lane_u32(w2, vtmp01x0123, 2); w2 += 2;
vtmp01x0123 = vld2q_lane_u32(w3, vtmp01x0123, 3); w3 += 2;
vtmp01x4567 = vld2q_lane_u32(w4, vtmp01x4567, 0); w4 += 2;
vtmp01x4567 = vld2q_lane_u32(w5, vtmp01x4567, 1); w5 += 2;
vtmp01x4567 = vld2q_lane_u32(w6, vtmp01x4567, 2); w6 += 2;
vtmp01x4567 = vld2q_lane_u32(w7, vtmp01x4567, 3); w7 += 2;
vst1q_u32(packed_weights, vtmp01x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[1]); packed_weights += 4;
break;
}
// KC remainder of 3
case 3:
{
uint32x4x3_t vtmp012x0123;
vtmp012x0123.val[0] = vdupq_n_u32(0);
vtmp012x0123.val[1] = vdupq_n_u32(0);
vtmp012x0123.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x4567;
vtmp012x4567.val[0] = vdupq_n_u32(0);
vtmp012x4567.val[1] = vdupq_n_u32(0);
vtmp012x4567.val[2] = vdupq_n_u32(0);
vtmp012x0123 = vld3q_lane_u32(w0, vtmp012x0123, 0); w0 += 3;
vtmp012x0123 = vld3q_lane_u32(w1, vtmp012x0123, 1); w1 += 3;
vtmp012x0123 = vld3q_lane_u32(w2, vtmp012x0123, 2); w2 += 3;
vtmp012x0123 = vld3q_lane_u32(w3, vtmp012x0123, 3); w3 += 3;
vtmp012x4567 = vld3q_lane_u32(w4, vtmp012x4567, 0); w4 += 3;
vtmp012x4567 = vld3q_lane_u32(w5, vtmp012x4567, 1); w5 += 3;
vtmp012x4567 = vld3q_lane_u32(w6, vtmp012x4567, 2); w6 += 3;
vtmp012x4567 = vld3q_lane_u32(w7, vtmp012x4567, 3); w7 += 3;
vst1q_u32(packed_weights, vtmp012x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[2]); packed_weights += 4;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (8 - n);
} else {
const uint32x4_t vzero = vmovq_n_u32(0);
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
}
// NR remainder has less than 8 rows so last row is not loaded
const uint32_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint32_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint32_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint32_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint32_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint32_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 8
size_t k = kc;
for (; k >= 8; k -= 8) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vtmp4567x0123 = vld4q_lane_u32(w0, vtmp4567x0123, 0); w0 += 4;
vtmp4567x0123 = vld4q_lane_u32(w1, vtmp4567x0123, 1); w1 += 4;
vtmp4567x0123 = vld4q_lane_u32(w2, vtmp4567x0123, 2); w2 += 4;
vtmp4567x0123 = vld4q_lane_u32(w3, vtmp4567x0123, 3); w3 += 4;
vtmp4567x4567 = vld4q_lane_u32(w4, vtmp4567x4567, 0); w4 += 4;
vtmp4567x4567 = vld4q_lane_u32(w5, vtmp4567x4567, 1); w5 += 4;
vtmp4567x4567 = vld4q_lane_u32(w6, vtmp4567x4567, 2); w6 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[3]); packed_weights += 4;
}
// KC remainder multiple of 4
if (k >= 4) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 1
case 1:
{
uint32x4_t vtmp0x0123 = vdupq_n_u32(0);
uint32x4_t vtmp0x4567 = vdupq_n_u32(0);
vtmp0x0123 = vld1q_lane_u32(w0, vtmp0x0123, 0);
vtmp0x0123 = vld1q_lane_u32(w1, vtmp0x0123, 1);
vtmp0x0123 = vld1q_lane_u32(w2, vtmp0x0123, 2);
vtmp0x0123 = vld1q_lane_u32(w3, vtmp0x0123, 3);
vtmp0x4567 = vld1q_lane_u32(w4, vtmp0x4567, 0);
vtmp0x4567 = vld1q_lane_u32(w5, vtmp0x4567, 1);
vtmp0x4567 = vld1q_lane_u32(w6, vtmp0x4567, 2);
vst1q_u32(packed_weights, vtmp0x0123); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x4567); packed_weights += 4;
break;
}
// KC remainder of 2
case 2:
{
uint32x4x2_t vtmp01x0123;
vtmp01x0123.val[0] = vdupq_n_u32(0);
vtmp01x0123.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x4567;
vtmp01x4567.val[0] = vdupq_n_u32(0);
vtmp01x4567.val[1] = vdupq_n_u32(0);
vtmp01x0123 = vld2q_lane_u32(w0, vtmp01x0123, 0);
vtmp01x0123 = vld2q_lane_u32(w1, vtmp01x0123, 1);
vtmp01x0123 = vld2q_lane_u32(w2, vtmp01x0123, 2);
vtmp01x0123 = vld2q_lane_u32(w3, vtmp01x0123, 3);
vtmp01x4567 = vld2q_lane_u32(w4, vtmp01x4567, 0);
vtmp01x4567 = vld2q_lane_u32(w5, vtmp01x4567, 1);
vtmp01x4567 = vld2q_lane_u32(w6, vtmp01x4567, 2);
vst1q_u32(packed_weights, vtmp01x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[1]); packed_weights += 4;
break;
}
// KC remainder of 3
case 3:
{
uint32x4x3_t vtmp012x0123;
vtmp012x0123.val[0] = vdupq_n_u32(0);
vtmp012x0123.val[1] = vdupq_n_u32(0);
vtmp012x0123.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x4567;
vtmp012x4567.val[0] = vdupq_n_u32(0);
vtmp012x4567.val[1] = vdupq_n_u32(0);
vtmp012x4567.val[2] = vdupq_n_u32(0);
vtmp012x0123 = vld3q_lane_u32(w0, vtmp012x0123, 0);
vtmp012x0123 = vld3q_lane_u32(w1, vtmp012x0123, 1);
vtmp012x0123 = vld3q_lane_u32(w2, vtmp012x0123, 2);
vtmp012x0123 = vld3q_lane_u32(w3, vtmp012x0123, 3);
vtmp012x4567 = vld3q_lane_u32(w4, vtmp012x4567, 0);
vtmp012x4567 = vld3q_lane_u32(w5, vtmp012x4567, 1);
vtmp012x4567 = vld3q_lane_u32(w6, vtmp012x4567, 2);
vst1q_u32(packed_weights, vtmp012x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[2]); packed_weights += 4;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 20,324
| 46.048611
| 80
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x8-gemm-goi-neon-ld4lane-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x8__neon_ld4lane_x8(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint32x4x4_t vtmp0123x0123;
uint32x4x4_t vtmp4567x0123;
uint32x4x4_t vtmp0123x4567;
uint32x4x4_t vtmp4567x4567;
do {
// NC main loop multiple of 8
const uint32_t* w0 = weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(bias != NULL) {
uint32x4_t vb0 = vld1q_u32(bias); bias += 4;
uint32x4_t vb4 = vld1q_u32(bias); bias += 4;
vst1q_u32(packed_weights, vb0); packed_weights += 4;
vst1q_u32(packed_weights, vb4); packed_weights += 4;
} else {
const uint32x4_t vzero = vmovq_n_u32(0);
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
}
const uint32_t* w1 = w0 + kc;
const uint32_t* w2 = w1 + kc;
const uint32_t* w3 = w2 + kc;
const uint32_t* w4 = w3 + kc;
const uint32_t* w5 = w4 + kc;
const uint32_t* w6 = w5 + kc;
const uint32_t* w7 = w6 + kc;
// KC main loop multiple of 8
size_t k = kc;
for (; k >= 8; k -= 8) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vtmp0123x4567 = vld4q_lane_u32(w7, vtmp0123x4567, 3); w7 += 4;
vtmp4567x0123 = vld4q_lane_u32(w0, vtmp4567x0123, 0); w0 += 4;
vtmp4567x0123 = vld4q_lane_u32(w1, vtmp4567x0123, 1); w1 += 4;
vtmp4567x0123 = vld4q_lane_u32(w2, vtmp4567x0123, 2); w2 += 4;
vtmp4567x0123 = vld4q_lane_u32(w3, vtmp4567x0123, 3); w3 += 4;
vtmp4567x4567 = vld4q_lane_u32(w4, vtmp4567x4567, 0); w4 += 4;
vtmp4567x4567 = vld4q_lane_u32(w5, vtmp4567x4567, 1); w5 += 4;
vtmp4567x4567 = vld4q_lane_u32(w6, vtmp4567x4567, 2); w6 += 4;
vtmp4567x4567 = vld4q_lane_u32(w7, vtmp4567x4567, 3); w7 += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[3]); packed_weights += 4;
}
// KC remainder multiple of 4
if (k >= 4) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vtmp0123x4567 = vld4q_lane_u32(w7, vtmp0123x4567, 3); w7 += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 1
case 1:
{
uint32x4_t vtmp0x0123 = vdupq_n_u32(0);
uint32x4_t vtmp0x4567 = vdupq_n_u32(0);
vtmp0x0123 = vld1q_lane_u32(w0, vtmp0x0123, 0); w0 += 1;
vtmp0x0123 = vld1q_lane_u32(w1, vtmp0x0123, 1); w1 += 1;
vtmp0x0123 = vld1q_lane_u32(w2, vtmp0x0123, 2); w2 += 1;
vtmp0x0123 = vld1q_lane_u32(w3, vtmp0x0123, 3); w3 += 1;
vtmp0x4567 = vld1q_lane_u32(w4, vtmp0x4567, 0); w4 += 1;
vtmp0x4567 = vld1q_lane_u32(w5, vtmp0x4567, 1); w5 += 1;
vtmp0x4567 = vld1q_lane_u32(w6, vtmp0x4567, 2); w6 += 1;
vtmp0x4567 = vld1q_lane_u32(w7, vtmp0x4567, 3); w7 += 1;
vst1q_u32(packed_weights, vtmp0x0123); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x4567); packed_weights += 4;
break;
}
// KC remainder of 2
case 2:
{
uint32x4x2_t vtmp01x0123;
vtmp01x0123.val[0] = vdupq_n_u32(0);
vtmp01x0123.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x4567;
vtmp01x4567.val[0] = vdupq_n_u32(0);
vtmp01x4567.val[1] = vdupq_n_u32(0);
vtmp01x0123 = vld2q_lane_u32(w0, vtmp01x0123, 0); w0 += 2;
vtmp01x0123 = vld2q_lane_u32(w1, vtmp01x0123, 1); w1 += 2;
vtmp01x0123 = vld2q_lane_u32(w2, vtmp01x0123, 2); w2 += 2;
vtmp01x0123 = vld2q_lane_u32(w3, vtmp01x0123, 3); w3 += 2;
vtmp01x4567 = vld2q_lane_u32(w4, vtmp01x4567, 0); w4 += 2;
vtmp01x4567 = vld2q_lane_u32(w5, vtmp01x4567, 1); w5 += 2;
vtmp01x4567 = vld2q_lane_u32(w6, vtmp01x4567, 2); w6 += 2;
vtmp01x4567 = vld2q_lane_u32(w7, vtmp01x4567, 3); w7 += 2;
vst1q_u32(packed_weights, vtmp01x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[1]); packed_weights += 4;
break;
}
// KC remainder of 3
case 3:
{
uint32x4x3_t vtmp012x0123;
vtmp012x0123.val[0] = vdupq_n_u32(0);
vtmp012x0123.val[1] = vdupq_n_u32(0);
vtmp012x0123.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x4567;
vtmp012x4567.val[0] = vdupq_n_u32(0);
vtmp012x4567.val[1] = vdupq_n_u32(0);
vtmp012x4567.val[2] = vdupq_n_u32(0);
vtmp012x0123 = vld3q_lane_u32(w0, vtmp012x0123, 0); w0 += 3;
vtmp012x0123 = vld3q_lane_u32(w1, vtmp012x0123, 1); w1 += 3;
vtmp012x0123 = vld3q_lane_u32(w2, vtmp012x0123, 2); w2 += 3;
vtmp012x0123 = vld3q_lane_u32(w3, vtmp012x0123, 3); w3 += 3;
vtmp012x4567 = vld3q_lane_u32(w4, vtmp012x4567, 0); w4 += 3;
vtmp012x4567 = vld3q_lane_u32(w5, vtmp012x4567, 1); w5 += 3;
vtmp012x4567 = vld3q_lane_u32(w6, vtmp012x4567, 2); w6 += 3;
vtmp012x4567 = vld3q_lane_u32(w7, vtmp012x4567, 3); w7 += 3;
vst1q_u32(packed_weights, vtmp012x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[2]); packed_weights += 4;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (8 - n);
} else {
const uint32x4_t vzero = vmovq_n_u32(0);
vst1q_u32(packed_weights, vzero); packed_weights += 4;
vst1q_u32(packed_weights, vzero); packed_weights += 4;
}
// NR remainder has less than 8 rows so last row is not loaded
const uint32_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint32_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint32_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint32_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint32_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint32_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 8
size_t k = kc;
for (; k >= 8; k -= 8) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vtmp4567x0123 = vld4q_lane_u32(w0, vtmp4567x0123, 0); w0 += 4;
vtmp4567x0123 = vld4q_lane_u32(w1, vtmp4567x0123, 1); w1 += 4;
vtmp4567x0123 = vld4q_lane_u32(w2, vtmp4567x0123, 2); w2 += 4;
vtmp4567x0123 = vld4q_lane_u32(w3, vtmp4567x0123, 3); w3 += 4;
vtmp4567x4567 = vld4q_lane_u32(w4, vtmp4567x4567, 0); w4 += 4;
vtmp4567x4567 = vld4q_lane_u32(w5, vtmp4567x4567, 1); w5 += 4;
vtmp4567x4567 = vld4q_lane_u32(w6, vtmp4567x4567, 2); w6 += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp4567x4567.val[3]); packed_weights += 4;
}
// KC remainder multiple of 4
if (k >= 4) {
vtmp0123x0123 = vld4q_lane_u32(w0, vtmp0123x0123, 0); w0 += 4;
vtmp0123x0123 = vld4q_lane_u32(w1, vtmp0123x0123, 1); w1 += 4;
vtmp0123x0123 = vld4q_lane_u32(w2, vtmp0123x0123, 2); w2 += 4;
vtmp0123x0123 = vld4q_lane_u32(w3, vtmp0123x0123, 3); w3 += 4;
vtmp0123x4567 = vld4q_lane_u32(w4, vtmp0123x4567, 0); w4 += 4;
vtmp0123x4567 = vld4q_lane_u32(w5, vtmp0123x4567, 1); w5 += 4;
vtmp0123x4567 = vld4q_lane_u32(w6, vtmp0123x4567, 2); w6 += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x0123.val[3]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0123x4567.val[3]); packed_weights += 4;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 1
case 1:
{
uint32x4_t vtmp0x0123 = vdupq_n_u32(0);
uint32x4_t vtmp0x4567 = vdupq_n_u32(0);
vtmp0x0123 = vld1q_lane_u32(w0, vtmp0x0123, 0);
vtmp0x0123 = vld1q_lane_u32(w1, vtmp0x0123, 1);
vtmp0x0123 = vld1q_lane_u32(w2, vtmp0x0123, 2);
vtmp0x0123 = vld1q_lane_u32(w3, vtmp0x0123, 3);
vtmp0x4567 = vld1q_lane_u32(w4, vtmp0x4567, 0);
vtmp0x4567 = vld1q_lane_u32(w5, vtmp0x4567, 1);
vtmp0x4567 = vld1q_lane_u32(w6, vtmp0x4567, 2);
vst1q_u32(packed_weights, vtmp0x0123); packed_weights += 4;
vst1q_u32(packed_weights, vtmp0x4567); packed_weights += 4;
break;
}
// KC remainder of 2
case 2:
{
uint32x4x2_t vtmp01x0123;
vtmp01x0123.val[0] = vdupq_n_u32(0);
vtmp01x0123.val[1] = vdupq_n_u32(0);
uint32x4x2_t vtmp01x4567;
vtmp01x4567.val[0] = vdupq_n_u32(0);
vtmp01x4567.val[1] = vdupq_n_u32(0);
vtmp01x0123 = vld2q_lane_u32(w0, vtmp01x0123, 0);
vtmp01x0123 = vld2q_lane_u32(w1, vtmp01x0123, 1);
vtmp01x0123 = vld2q_lane_u32(w2, vtmp01x0123, 2);
vtmp01x0123 = vld2q_lane_u32(w3, vtmp01x0123, 3);
vtmp01x4567 = vld2q_lane_u32(w4, vtmp01x4567, 0);
vtmp01x4567 = vld2q_lane_u32(w5, vtmp01x4567, 1);
vtmp01x4567 = vld2q_lane_u32(w6, vtmp01x4567, 2);
vst1q_u32(packed_weights, vtmp01x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp01x4567.val[1]); packed_weights += 4;
break;
}
// KC remainder of 3
case 3:
{
uint32x4x3_t vtmp012x0123;
vtmp012x0123.val[0] = vdupq_n_u32(0);
vtmp012x0123.val[1] = vdupq_n_u32(0);
vtmp012x0123.val[2] = vdupq_n_u32(0);
uint32x4x3_t vtmp012x4567;
vtmp012x4567.val[0] = vdupq_n_u32(0);
vtmp012x4567.val[1] = vdupq_n_u32(0);
vtmp012x4567.val[2] = vdupq_n_u32(0);
vtmp012x0123 = vld3q_lane_u32(w0, vtmp012x0123, 0);
vtmp012x0123 = vld3q_lane_u32(w1, vtmp012x0123, 1);
vtmp012x0123 = vld3q_lane_u32(w2, vtmp012x0123, 2);
vtmp012x0123 = vld3q_lane_u32(w3, vtmp012x0123, 3);
vtmp012x4567 = vld3q_lane_u32(w4, vtmp012x4567, 0);
vtmp012x4567 = vld3q_lane_u32(w5, vtmp012x4567, 1);
vtmp012x4567 = vld3q_lane_u32(w6, vtmp012x4567, 2);
vst1q_u32(packed_weights, vtmp012x0123.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[0]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[1]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x0123.val[2]); packed_weights += 4;
vst1q_u32(packed_weights, vtmp012x4567.val[2]); packed_weights += 4;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 17,893
| 45.477922
| 80
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x8-gemm-goi-scalar-float-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x8__scalar_float_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
float* out = (float*) packed_weights;
const float* b = (const float*) bias;
do {
// NC main loop multiple of 8
const float* w0 = (const float*) weights;
size_t n = nc;
for (;n >= 8; n -= 8) {
if XNN_LIKELY(b != NULL) {
out[0] = b[0];
out[1] = b[1];
out[2] = b[2];
out[3] = b[3];
out[4] = b[4];
out[5] = b[5];
out[6] = b[6];
out[7] = b[7];
b += 8;
} else {
out[0] = 0;
out[1] = 0;
out[2] = 0;
out[3] = 0;
out[4] = 0;
out[5] = 0;
out[6] = 0;
out[7] = 0;
}
out += 8;
const float* w1 = w0 + kc;
const float* w2 = w1 + kc;
const float* w3 = w2 + kc;
const float* w4 = w3 + kc;
const float* w5 = w4 + kc;
const float* w6 = w5 + kc;
const float* w7 = w6 + kc;
// KC main loop multiple of 8x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const float v00 = w0[0];
const float v01 = w0[1];
const float v02 = w0[2];
const float v03 = w0[3];
w0 += 4;
const float v10 = w1[0];
const float v11 = w1[1];
const float v12 = w1[2];
const float v13 = w1[3];
w1 += 4;
const float v20 = w2[0];
const float v21 = w2[1];
const float v22 = w2[2];
const float v23 = w2[3];
w2 += 4;
const float v30 = w3[0];
const float v31 = w3[1];
const float v32 = w3[2];
const float v33 = w3[3];
w3 += 4;
const float v40 = w4[0];
const float v41 = w4[1];
const float v42 = w4[2];
const float v43 = w4[3];
w4 += 4;
const float v50 = w5[0];
const float v51 = w5[1];
const float v52 = w5[2];
const float v53 = w5[3];
w5 += 4;
const float v60 = w6[0];
const float v61 = w6[1];
const float v62 = w6[2];
const float v63 = w6[3];
w6 += 4;
const float v70 = w7[0];
const float v71 = w7[1];
const float v72 = w7[2];
const float v73 = w7[3];
w7 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[7] = v70;
out[8] = v01;
out[9] = v11;
out[10] = v21;
out[11] = v31;
out[12] = v41;
out[13] = v51;
out[14] = v61;
out[15] = v71;
out[16] = v02;
out[17] = v12;
out[18] = v22;
out[19] = v32;
out[20] = v42;
out[21] = v52;
out[22] = v62;
out[23] = v72;
out[24] = v03;
out[25] = v13;
out[26] = v23;
out[27] = v33;
out[28] = v43;
out[29] = v53;
out[30] = v63;
out[31] = v73;
out += 32;
}
// KC remainder
for (; k != 0; --k) {
const float v0 = *w0++;
out[0] = v0;
const float v1 = *w1++;
out[1] = v1;
const float v2 = *w2++;
out[2] = v2;
const float v3 = *w3++;
out[3] = v3;
const float v4 = *w4++;
out[4] = v4;
const float v5 = *w5++;
out[5] = v5;
const float v6 = *w6++;
out[6] = v6;
const float v7 = *w7++;
out[7] = v7;
out += 8;
}
out = (float*) ((uintptr_t) out + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*out++ = *b++;
} while (--nb != 0);
} else {
size_t nb = n;
do {
*out++ = 0;
} while (--nb != 0);
}
out += (8 - n);
// NR remainder has less than 8 rows so last row is not loaded
const float* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const float* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const float* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const float* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const float* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const float* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 8x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const float v00 = w0[0];
const float v01 = w0[1];
const float v02 = w0[2];
const float v03 = w0[3];
w0 += 4;
const float v10 = w1[0];
const float v11 = w1[1];
const float v12 = w1[2];
const float v13 = w1[3];
w1 += 4;
const float v20 = w2[0];
const float v21 = w2[1];
const float v22 = w2[2];
const float v23 = w2[3];
w2 += 4;
const float v30 = w3[0];
const float v31 = w3[1];
const float v32 = w3[2];
const float v33 = w3[3];
w3 += 4;
const float v40 = w4[0];
const float v41 = w4[1];
const float v42 = w4[2];
const float v43 = w4[3];
w4 += 4;
const float v50 = w5[0];
const float v51 = w5[1];
const float v52 = w5[2];
const float v53 = w5[3];
w5 += 4;
const float v60 = w6[0];
const float v61 = w6[1];
const float v62 = w6[2];
const float v63 = w6[3];
w6 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[8] = v01;
out[9] = v11;
out[10] = v21;
out[11] = v31;
out[12] = v41;
out[13] = v51;
out[14] = v61;
out[16] = v02;
out[17] = v12;
out[18] = v22;
out[19] = v32;
out[20] = v42;
out[21] = v52;
out[22] = v62;
out[24] = v03;
out[25] = v13;
out[26] = v23;
out[27] = v33;
out[28] = v43;
out[29] = v53;
out[30] = v63;
out += 32;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const float v0 = *w0++;
out[0] = v0;
const float v1 = *w1++;
out[1] = v1;
const float v2 = *w2++;
out[2] = v2;
const float v3 = *w3++;
out[3] = v3;
const float v4 = *w4++;
out[4] = v4;
const float v5 = *w5++;
out[5] = v5;
const float v6 = *w6++;
out[6] = v6;
out += 8;
}
out = (float*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 7,627
| 23.215873
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x8-gemm-goi-scalar-int-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x8__scalar_int_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint32_t* out = (uint32_t*) packed_weights;
const uint32_t* b = (const uint32_t*) bias;
do {
// NC main loop multiple of 8
const uint32_t* w0 = (const uint32_t*) weights;
size_t n = nc;
for (;n >= 8; n -= 8) {
if XNN_LIKELY(b != NULL) {
out[0] = b[0];
out[1] = b[1];
out[2] = b[2];
out[3] = b[3];
out[4] = b[4];
out[5] = b[5];
out[6] = b[6];
out[7] = b[7];
b += 8;
} else {
out[0] = 0;
out[1] = 0;
out[2] = 0;
out[3] = 0;
out[4] = 0;
out[5] = 0;
out[6] = 0;
out[7] = 0;
}
out += 8;
const uint32_t* w1 = w0 + kc;
const uint32_t* w2 = w1 + kc;
const uint32_t* w3 = w2 + kc;
const uint32_t* w4 = w3 + kc;
const uint32_t* w5 = w4 + kc;
const uint32_t* w6 = w5 + kc;
const uint32_t* w7 = w6 + kc;
// KC main loop multiple of 8x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const uint32_t v00 = w0[0];
const uint32_t v01 = w0[1];
const uint32_t v02 = w0[2];
const uint32_t v03 = w0[3];
w0 += 4;
const uint32_t v10 = w1[0];
const uint32_t v11 = w1[1];
const uint32_t v12 = w1[2];
const uint32_t v13 = w1[3];
w1 += 4;
const uint32_t v20 = w2[0];
const uint32_t v21 = w2[1];
const uint32_t v22 = w2[2];
const uint32_t v23 = w2[3];
w2 += 4;
const uint32_t v30 = w3[0];
const uint32_t v31 = w3[1];
const uint32_t v32 = w3[2];
const uint32_t v33 = w3[3];
w3 += 4;
const uint32_t v40 = w4[0];
const uint32_t v41 = w4[1];
const uint32_t v42 = w4[2];
const uint32_t v43 = w4[3];
w4 += 4;
const uint32_t v50 = w5[0];
const uint32_t v51 = w5[1];
const uint32_t v52 = w5[2];
const uint32_t v53 = w5[3];
w5 += 4;
const uint32_t v60 = w6[0];
const uint32_t v61 = w6[1];
const uint32_t v62 = w6[2];
const uint32_t v63 = w6[3];
w6 += 4;
const uint32_t v70 = w7[0];
const uint32_t v71 = w7[1];
const uint32_t v72 = w7[2];
const uint32_t v73 = w7[3];
w7 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[7] = v70;
out[8] = v01;
out[9] = v11;
out[10] = v21;
out[11] = v31;
out[12] = v41;
out[13] = v51;
out[14] = v61;
out[15] = v71;
out[16] = v02;
out[17] = v12;
out[18] = v22;
out[19] = v32;
out[20] = v42;
out[21] = v52;
out[22] = v62;
out[23] = v72;
out[24] = v03;
out[25] = v13;
out[26] = v23;
out[27] = v33;
out[28] = v43;
out[29] = v53;
out[30] = v63;
out[31] = v73;
out += 32;
}
// KC remainder
for (; k != 0; --k) {
const uint32_t v0 = *w0++;
out[0] = v0;
const uint32_t v1 = *w1++;
out[1] = v1;
const uint32_t v2 = *w2++;
out[2] = v2;
const uint32_t v3 = *w3++;
out[3] = v3;
const uint32_t v4 = *w4++;
out[4] = v4;
const uint32_t v5 = *w5++;
out[5] = v5;
const uint32_t v6 = *w6++;
out[6] = v6;
const uint32_t v7 = *w7++;
out[7] = v7;
out += 8;
}
out = (uint32_t*) ((uintptr_t) out + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*out++ = *b++;
} while (--nb != 0);
} else {
size_t nb = n;
do {
*out++ = 0;
} while (--nb != 0);
}
out += (8 - n);
// NR remainder has less than 8 rows so last row is not loaded
const uint32_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint32_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint32_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint32_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint32_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint32_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 8x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const uint32_t v00 = w0[0];
const uint32_t v01 = w0[1];
const uint32_t v02 = w0[2];
const uint32_t v03 = w0[3];
w0 += 4;
const uint32_t v10 = w1[0];
const uint32_t v11 = w1[1];
const uint32_t v12 = w1[2];
const uint32_t v13 = w1[3];
w1 += 4;
const uint32_t v20 = w2[0];
const uint32_t v21 = w2[1];
const uint32_t v22 = w2[2];
const uint32_t v23 = w2[3];
w2 += 4;
const uint32_t v30 = w3[0];
const uint32_t v31 = w3[1];
const uint32_t v32 = w3[2];
const uint32_t v33 = w3[3];
w3 += 4;
const uint32_t v40 = w4[0];
const uint32_t v41 = w4[1];
const uint32_t v42 = w4[2];
const uint32_t v43 = w4[3];
w4 += 4;
const uint32_t v50 = w5[0];
const uint32_t v51 = w5[1];
const uint32_t v52 = w5[2];
const uint32_t v53 = w5[3];
w5 += 4;
const uint32_t v60 = w6[0];
const uint32_t v61 = w6[1];
const uint32_t v62 = w6[2];
const uint32_t v63 = w6[3];
w6 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[8] = v01;
out[9] = v11;
out[10] = v21;
out[11] = v31;
out[12] = v41;
out[13] = v51;
out[14] = v61;
out[16] = v02;
out[17] = v12;
out[18] = v22;
out[19] = v32;
out[20] = v42;
out[21] = v52;
out[22] = v62;
out[24] = v03;
out[25] = v13;
out[26] = v23;
out[27] = v33;
out[28] = v43;
out[29] = v53;
out[30] = v63;
out += 32;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const uint32_t v0 = *w0++;
out[0] = v0;
const uint32_t v1 = *w1++;
out[1] = v1;
const uint32_t v2 = *w2++;
out[2] = v2;
const uint32_t v3 = *w3++;
out[3] = v3;
const uint32_t v4 = *w4++;
out[4] = v4;
const uint32_t v5 = *w5++;
out[5] = v5;
const uint32_t v6 = *w6++;
out[6] = v6;
out += 8;
}
out = (uint32_t*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 7,913
| 24.12381
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x8-gemm-goi-sse2-x4-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/packw.h>
#include <xnnpack/prefetch.h>
void xnn_x32_packw_gemm_goi_ukernel_x8__sse2_x4_prfm(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
const float* b = (const float*) bias;
float* packed_w = (float*) packed_weights;
do {
// NC main loop multiple of 8
const float* w0 = (const float*) weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(b != NULL) {
const __m128 vb0123 = _mm_loadu_ps(b);
const __m128 vb4567 = _mm_loadu_ps(b + 4);
b += 8;
_mm_store_ps(packed_w, vb0123);
_mm_store_ps(packed_w + 4, vb4567);
} else {
const __m128 vzero = _mm_setzero_ps();
_mm_store_ps(packed_w, vzero);
_mm_store_ps(packed_w + 4, vzero);
}
packed_w += 8;
const float* w1 = w0 + kc;
const float* w2 = w1 + kc;
const float* w3 = w2 + kc;
const float* w4 = w3 + kc;
const float* w5 = w4 + kc;
const float* w6 = w5 + kc;
const float* w7 = w6 + kc;
xnn_prefetch_to_l1((const int8_t*) w0);
xnn_prefetch_to_l1((const int8_t*) w0 + 64);
xnn_prefetch_to_l1((const int8_t*) w1);
xnn_prefetch_to_l1((const int8_t*) w1 + 64);
xnn_prefetch_to_l1((const int8_t*) w2);
xnn_prefetch_to_l1((const int8_t*) w2 + 64);
xnn_prefetch_to_l1((const int8_t*) w3);
xnn_prefetch_to_l1((const int8_t*) w3 + 64);
xnn_prefetch_to_l1((const int8_t*) w4);
xnn_prefetch_to_l1((const int8_t*) w4 + 64);
xnn_prefetch_to_l1((const int8_t*) w5);
xnn_prefetch_to_l1((const int8_t*) w5 + 64);
xnn_prefetch_to_l1((const int8_t*) w6);
xnn_prefetch_to_l1((const int8_t*) w6 + 64);
xnn_prefetch_to_l1((const int8_t*) w7);
xnn_prefetch_to_l1((const int8_t*) w7 + 64);
size_t k = kc;
// KC multiple of 4
for (; k >= 4; k -= 4) {
const __m128 v0x0123 = _mm_loadu_ps(w0);
w0 += 4;
const __m128 v1x0123 = _mm_loadu_ps(w1);
w1 += 4;
const __m128 v2x0123 = _mm_loadu_ps(w2);
w2 += 4;
const __m128 v3x0123 = _mm_loadu_ps(w3);
w3 += 4;
const __m128 v4x0123 = _mm_loadu_ps(w4);
w4 += 4;
const __m128 v5x0123 = _mm_loadu_ps(w5);
w5 += 4;
const __m128 v6x0123 = _mm_loadu_ps(w6);
w6 += 4;
const __m128 v7x0123 = _mm_loadu_ps(w7);
w7 += 4;
const __m128 v01x0_01x1 = _mm_unpacklo_ps(v0x0123, v1x0123);
const __m128 v23x0_23x1 = _mm_unpacklo_ps(v2x0123, v3x0123);
const __m128 v01x2_01x3 = _mm_unpackhi_ps(v0x0123, v1x0123);
const __m128 v23x2_23x3 = _mm_unpackhi_ps(v2x0123, v3x0123);
const __m128 v45x0_45x1 = _mm_unpacklo_ps(v4x0123, v5x0123);
const __m128 v67x0_67x1 = _mm_unpacklo_ps(v6x0123, v7x0123);
const __m128 v45x2_45x3 = _mm_unpackhi_ps(v4x0123, v5x0123);
const __m128 v67x2_67x3 = _mm_unpackhi_ps(v6x0123, v7x0123);
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
xnn_prefetch_to_l1((const int8_t*) w7 + 128);
const __m128 v0123x0 = _mm_movelh_ps(v01x0_01x1, v23x0_23x1);
const __m128 v0123x1 = _mm_movehl_ps(v23x0_23x1, v01x0_01x1);
const __m128 v0123x2 = _mm_movelh_ps(v01x2_01x3, v23x2_23x3);
const __m128 v0123x3 = _mm_movehl_ps(v23x2_23x3, v01x2_01x3);
const __m128 v4567x0 = _mm_movelh_ps(v45x0_45x1, v67x0_67x1);
const __m128 v4567x1 = _mm_movehl_ps(v67x0_67x1, v45x0_45x1);
const __m128 v4567x2 = _mm_movelh_ps(v45x2_45x3, v67x2_67x3);
const __m128 v4567x3 = _mm_movehl_ps(v67x2_67x3, v45x2_45x3);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
_mm_store_ps(packed_w + 8, v0123x1);
_mm_store_ps(packed_w + 12, v4567x1);
_mm_store_ps(packed_w + 16, v0123x2);
_mm_store_ps(packed_w + 20, v4567x2);
_mm_store_ps(packed_w + 24, v0123x3);
_mm_store_ps(packed_w + 28, v4567x3);
packed_w += 32;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
case 1:
{
const __m128 v0x0 = _mm_load_ss(w0);
w0 += 1;
const __m128 v1x0 = _mm_load_ss(w1);
w1 += 1;
const __m128 v2x0 = _mm_load_ss(w2);
w2 += 1;
const __m128 v3x0 = _mm_load_ss(w3);
w3 += 1;
const __m128 v4x0 = _mm_load_ss(w4);
w4 += 1;
const __m128 v5x0 = _mm_load_ss(w5);
w5 += 1;
const __m128 v6x0 = _mm_load_ss(w6);
w6 += 1;
const __m128 v7x0 = _mm_load_ss(w7);
w7 += 1;
const __m128 v01x0 = _mm_unpacklo_ps(v0x0, v1x0);
const __m128 v23x0 = _mm_unpacklo_ps(v2x0, v3x0);
const __m128 v45x0 = _mm_unpacklo_ps(v4x0, v5x0);
const __m128 v67x0 = _mm_unpacklo_ps(v6x0, v7x0);
const __m128 v0123x0 = _mm_movelh_ps(v01x0, v23x0);
const __m128 v4567x0 = _mm_movelh_ps(v45x0, v67x0);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
packed_w += 8;
break;
}
case 2:
{
const __m128 v0x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w0));
w0 += 2;
const __m128 v1x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w1));
w1 += 2;
const __m128 v2x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w2));
w2 += 2;
const __m128 v3x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w3));
w3 += 2;
const __m128 v4x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w4));
w4 += 2;
const __m128 v5x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w5));
w5 += 2;
const __m128 v6x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w6));
w6 += 2;
const __m128 v7x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w7));
w7 += 2;
const __m128 v01x0_01x1 = _mm_unpacklo_ps(v0x01, v1x01);
const __m128 v23x0_23x1 = _mm_unpacklo_ps(v2x01, v3x01);
const __m128 v45x0_45x1 = _mm_unpacklo_ps(v4x01, v5x01);
const __m128 v67x0_67x1 = _mm_unpacklo_ps(v6x01, v7x01);
const __m128 v0123x0 = _mm_movelh_ps(v01x0_01x1, v23x0_23x1);
const __m128 v0123x1 = _mm_movehl_ps(v23x0_23x1, v01x0_01x1);
const __m128 v4567x0 = _mm_movelh_ps(v45x0_45x1, v67x0_67x1);
const __m128 v4567x1 = _mm_movehl_ps(v67x0_67x1, v45x0_45x1);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
_mm_store_ps(packed_w + 8, v0123x1);
_mm_store_ps(packed_w + 12, v4567x1);
packed_w += 16;
break;
}
case 3:
{
__m128 v0x012 = _mm_load_ss(w0 + 2);
__m128 v1x012 = _mm_load_ss(w1 + 2);
__m128 v2x012 = _mm_load_ss(w2 + 2);
__m128 v3x012 = _mm_load_ss(w3 + 2);
__m128 v4x012 = _mm_load_ss(w4 + 2);
__m128 v5x012 = _mm_load_ss(w5 + 2);
__m128 v6x012 = _mm_load_ss(w6 + 2);
__m128 v7x012 = _mm_load_ss(w7 + 2);
v0x012 = _mm_movelh_ps(v0x012, v0x012);
v1x012 = _mm_movelh_ps(v1x012, v1x012);
v2x012 = _mm_movelh_ps(v2x012, v2x012);
v3x012 = _mm_movelh_ps(v3x012, v3x012);
v4x012 = _mm_movelh_ps(v4x012, v4x012);
v5x012 = _mm_movelh_ps(v5x012, v5x012);
v6x012 = _mm_movelh_ps(v6x012, v6x012);
v7x012 = _mm_movelh_ps(v7x012, v7x012);
v0x012 = _mm_loadl_pi(v0x012, (const __m64*) w0);
w0 += 3;
v1x012 = _mm_loadl_pi(v1x012, (const __m64*) w1);
w1 += 3;
v2x012 = _mm_loadl_pi(v2x012, (const __m64*) w2);
w2 += 3;
v3x012 = _mm_loadl_pi(v3x012, (const __m64*) w3);
w3 += 3;
v4x012 = _mm_loadl_pi(v4x012, (const __m64*) w4);
w4 += 3;
v5x012 = _mm_loadl_pi(v5x012, (const __m64*) w5);
w5 += 3;
v6x012 = _mm_loadl_pi(v6x012, (const __m64*) w6);
w6 += 3;
v7x012 = _mm_loadl_pi(v7x012, (const __m64*) w7);
w7 += 3;
const __m128 v01x0_01x1 = _mm_unpacklo_ps(v0x012, v1x012);
const __m128 v23x0_23x1 = _mm_unpacklo_ps(v2x012, v3x012);
const __m128 v01x2 = _mm_unpackhi_ps(v0x012, v1x012);
const __m128 v23x2 = _mm_unpackhi_ps(v2x012, v3x012);
const __m128 v45x0_45x1 = _mm_unpacklo_ps(v4x012, v5x012);
const __m128 v67x0_67x1 = _mm_unpacklo_ps(v6x012, v7x012);
const __m128 v45x2 = _mm_unpackhi_ps(v4x012, v5x012);
const __m128 v67x2 = _mm_unpackhi_ps(v6x012, v7x012);
const __m128 v0123x0 = _mm_movelh_ps(v01x0_01x1, v23x0_23x1);
const __m128 v0123x1 = _mm_movehl_ps(v23x0_23x1, v01x0_01x1);
const __m128 v0123x2 = _mm_movelh_ps(v01x2, v23x2);
const __m128 v4567x0 = _mm_movelh_ps(v45x0_45x1, v67x0_67x1);
const __m128 v4567x1 = _mm_movehl_ps(v67x0_67x1, v45x0_45x1);
const __m128 v4567x2 = _mm_movelh_ps(v45x2, v67x2);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
_mm_store_ps(packed_w + 8, v0123x1);
_mm_store_ps(packed_w + 12, v4567x1);
_mm_store_ps(packed_w + 16, v0123x2);
_mm_store_ps(packed_w + 20, v4567x2);
packed_w += 24;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_w = (float*) ((uintptr_t) packed_w + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*packed_w++ = *b++;
} while (--nb != 0);
packed_w += (8 - n);
} else {
const __m128 vzero = _mm_setzero_ps();
_mm_store_ps(packed_w, vzero);
_mm_store_ps(packed_w + 4, vzero);
packed_w += 8;
}
const float* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const float* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const float* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const float* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const float* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const float* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
size_t k = kc;
// KC multiple of 4
for (; k >= 4; k -= 4) {
const __m128 v0x0123 = _mm_loadu_ps(w0);
w0 += 4;
const __m128 v1x0123 = _mm_loadu_ps(w1);
w1 += 4;
const __m128 v2x0123 = _mm_loadu_ps(w2);
w2 += 4;
const __m128 v3x0123 = _mm_loadu_ps(w3);
w3 += 4;
const __m128 v4x0123 = _mm_loadu_ps(w4);
w4 += 4;
const __m128 v5x0123 = _mm_loadu_ps(w5);
w5 += 4;
const __m128 v6x0123 = _mm_loadu_ps(w6);
w6 += 4;
const __m128 v01x0_01x1 = _mm_unpacklo_ps(v0x0123, v1x0123);
const __m128 v23x0_23x1 = _mm_unpacklo_ps(v2x0123, v3x0123);
const __m128 v01x2_01x3 = _mm_unpackhi_ps(v0x0123, v1x0123);
const __m128 v23x2_23x3 = _mm_unpackhi_ps(v2x0123, v3x0123);
const __m128 v45x0_45x1 = _mm_unpacklo_ps(v4x0123, v5x0123);
const __m128 v67x0_67x1 = _mm_unpacklo_ps(v6x0123, v6x0123);
const __m128 v45x2_45x3 = _mm_unpackhi_ps(v4x0123, v5x0123);
const __m128 v67x2_67x3 = _mm_unpackhi_ps(v6x0123, v6x0123);
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
const __m128 v0123x0 = _mm_movelh_ps(v01x0_01x1, v23x0_23x1);
const __m128 v0123x1 = _mm_movehl_ps(v23x0_23x1, v01x0_01x1);
const __m128 v0123x2 = _mm_movelh_ps(v01x2_01x3, v23x2_23x3);
const __m128 v0123x3 = _mm_movehl_ps(v23x2_23x3, v01x2_01x3);
const __m128 v4567x0 = _mm_movelh_ps(v45x0_45x1, v67x0_67x1);
const __m128 v4567x1 = _mm_movehl_ps(v67x0_67x1, v45x0_45x1);
const __m128 v4567x2 = _mm_movelh_ps(v45x2_45x3, v67x2_67x3);
const __m128 v4567x3 = _mm_movehl_ps(v67x2_67x3, v45x2_45x3);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
_mm_store_ps(packed_w + 8, v0123x1);
_mm_store_ps(packed_w + 12, v4567x1);
_mm_store_ps(packed_w + 16, v0123x2);
_mm_store_ps(packed_w + 20, v4567x2);
_mm_store_ps(packed_w + 24, v0123x3);
_mm_store_ps(packed_w + 28, v4567x3);
packed_w += 32;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
case 1:
{
const __m128 v0x0 = _mm_load_ss(w0);
const __m128 v1x0 = _mm_load_ss(w1);
const __m128 v2x0 = _mm_load_ss(w2);
const __m128 v3x0 = _mm_load_ss(w3);
const __m128 v4x0 = _mm_load_ss(w4);
const __m128 v5x0 = _mm_load_ss(w5);
const __m128 v6x0 = _mm_load_ss(w6);
const __m128 v01x0 = _mm_unpacklo_ps(v0x0, v1x0);
const __m128 v23x0 = _mm_unpacklo_ps(v2x0, v3x0);
const __m128 v45x0 = _mm_unpacklo_ps(v4x0, v5x0);
const __m128 v67x0 = _mm_unpacklo_ps(v6x0, v6x0);
const __m128 v0123x0 = _mm_movelh_ps(v01x0, v23x0);
const __m128 v4567x0 = _mm_movelh_ps(v45x0, v67x0);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
packed_w += 8;
break;
}
case 2:
{
const __m128 v0x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w0));
const __m128 v1x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w1));
const __m128 v2x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w2));
const __m128 v3x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w3));
const __m128 v4x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w4));
const __m128 v5x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w5));
const __m128 v6x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w6));
const __m128 v01x0_01x1 = _mm_unpacklo_ps(v0x01, v1x01);
const __m128 v23x0_23x1 = _mm_unpacklo_ps(v2x01, v3x01);
const __m128 v45x0_45x1 = _mm_unpacklo_ps(v4x01, v5x01);
const __m128 v67x0_67x1 = _mm_unpacklo_ps(v6x01, v6x01);
const __m128 v0123x0 = _mm_movelh_ps(v01x0_01x1, v23x0_23x1);
const __m128 v0123x1 = _mm_movehl_ps(v23x0_23x1, v01x0_01x1);
const __m128 v4567x0 = _mm_movelh_ps(v45x0_45x1, v67x0_67x1);
const __m128 v4567x1 = _mm_movehl_ps(v67x0_67x1, v45x0_45x1);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
_mm_store_ps(packed_w + 8, v0123x1);
_mm_store_ps(packed_w + 12, v4567x1);
packed_w += 16;
break;
}
case 3:
{
__m128 v0x012 = _mm_load_ss(w0 + 2);
__m128 v1x012 = _mm_load_ss(w1 + 2);
__m128 v2x012 = _mm_load_ss(w2 + 2);
__m128 v3x012 = _mm_load_ss(w3 + 2);
__m128 v4x012 = _mm_load_ss(w4 + 2);
__m128 v5x012 = _mm_load_ss(w5 + 2);
__m128 v6x012 = _mm_load_ss(w6 + 2);
v0x012 = _mm_movelh_ps(v0x012, v0x012);
v1x012 = _mm_movelh_ps(v1x012, v1x012);
v2x012 = _mm_movelh_ps(v2x012, v2x012);
v3x012 = _mm_movelh_ps(v3x012, v3x012);
v4x012 = _mm_movelh_ps(v4x012, v4x012);
v5x012 = _mm_movelh_ps(v5x012, v5x012);
v6x012 = _mm_movelh_ps(v6x012, v6x012);
v0x012 = _mm_loadl_pi(v0x012, (const __m64*) w0);
v1x012 = _mm_loadl_pi(v1x012, (const __m64*) w1);
v2x012 = _mm_loadl_pi(v2x012, (const __m64*) w2);
v3x012 = _mm_loadl_pi(v3x012, (const __m64*) w3);
v4x012 = _mm_loadl_pi(v4x012, (const __m64*) w4);
v5x012 = _mm_loadl_pi(v5x012, (const __m64*) w5);
v6x012 = _mm_loadl_pi(v6x012, (const __m64*) w6);
const __m128 v01x0_01x1 = _mm_unpacklo_ps(v0x012, v1x012);
const __m128 v23x0_23x1 = _mm_unpacklo_ps(v2x012, v3x012);
const __m128 v01x2 = _mm_unpackhi_ps(v0x012, v1x012);
const __m128 v23x2 = _mm_unpackhi_ps(v2x012, v3x012);
const __m128 v45x0_45x1 = _mm_unpacklo_ps(v4x012, v5x012);
const __m128 v67x0_67x1 = _mm_unpacklo_ps(v6x012, v6x012);
const __m128 v45x2 = _mm_unpackhi_ps(v4x012, v5x012);
const __m128 v67x2 = _mm_unpackhi_ps(v6x012, v6x012);
const __m128 v0123x0 = _mm_movelh_ps(v01x0_01x1, v23x0_23x1);
const __m128 v0123x1 = _mm_movehl_ps(v23x0_23x1, v01x0_01x1);
const __m128 v0123x2 = _mm_movelh_ps(v01x2, v23x2);
const __m128 v4567x0 = _mm_movelh_ps(v45x0_45x1, v67x0_67x1);
const __m128 v4567x1 = _mm_movehl_ps(v67x0_67x1, v45x0_45x1);
const __m128 v4567x2 = _mm_movelh_ps(v45x2, v67x2);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
_mm_store_ps(packed_w + 8, v0123x1);
_mm_store_ps(packed_w + 12, v4567x1);
_mm_store_ps(packed_w + 16, v0123x2);
_mm_store_ps(packed_w + 20, v4567x2);
packed_w += 24;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_w = (float*) ((uintptr_t) packed_w + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 19,628
| 38.336673
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x8-gemm-goi-sse2-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x8__sse2_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
const float* b = (const float*) bias;
float* packed_w = (float*) packed_weights;
do {
// NC main loop multiple of 8
const float* w0 = (const float*) weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(b != NULL) {
const __m128 vb0123 = _mm_loadu_ps(b);
const __m128 vb4567 = _mm_loadu_ps(b + 4);
b += 8;
_mm_store_ps(packed_w, vb0123);
_mm_store_ps(packed_w + 4, vb4567);
} else {
const __m128 vzero = _mm_setzero_ps();
_mm_store_ps(packed_w, vzero);
_mm_store_ps(packed_w + 4, vzero);
}
packed_w += 8;
const float* w1 = w0 + kc;
const float* w2 = w1 + kc;
const float* w3 = w2 + kc;
const float* w4 = w3 + kc;
const float* w5 = w4 + kc;
const float* w6 = w5 + kc;
const float* w7 = w6 + kc;
size_t k = kc;
// KC multiple of 4
for (; k >= 4; k -= 4) {
const __m128 v0x0123 = _mm_loadu_ps(w0);
w0 += 4;
const __m128 v1x0123 = _mm_loadu_ps(w1);
w1 += 4;
const __m128 v2x0123 = _mm_loadu_ps(w2);
w2 += 4;
const __m128 v3x0123 = _mm_loadu_ps(w3);
w3 += 4;
const __m128 v4x0123 = _mm_loadu_ps(w4);
w4 += 4;
const __m128 v5x0123 = _mm_loadu_ps(w5);
w5 += 4;
const __m128 v6x0123 = _mm_loadu_ps(w6);
w6 += 4;
const __m128 v7x0123 = _mm_loadu_ps(w7);
w7 += 4;
const __m128 v01x0_01x1 = _mm_unpacklo_ps(v0x0123, v1x0123);
const __m128 v23x0_23x1 = _mm_unpacklo_ps(v2x0123, v3x0123);
const __m128 v01x2_01x3 = _mm_unpackhi_ps(v0x0123, v1x0123);
const __m128 v23x2_23x3 = _mm_unpackhi_ps(v2x0123, v3x0123);
const __m128 v45x0_45x1 = _mm_unpacklo_ps(v4x0123, v5x0123);
const __m128 v67x0_67x1 = _mm_unpacklo_ps(v6x0123, v7x0123);
const __m128 v45x2_45x3 = _mm_unpackhi_ps(v4x0123, v5x0123);
const __m128 v67x2_67x3 = _mm_unpackhi_ps(v6x0123, v7x0123);
const __m128 v0123x0 = _mm_movelh_ps(v01x0_01x1, v23x0_23x1);
const __m128 v0123x1 = _mm_movehl_ps(v23x0_23x1, v01x0_01x1);
const __m128 v0123x2 = _mm_movelh_ps(v01x2_01x3, v23x2_23x3);
const __m128 v0123x3 = _mm_movehl_ps(v23x2_23x3, v01x2_01x3);
const __m128 v4567x0 = _mm_movelh_ps(v45x0_45x1, v67x0_67x1);
const __m128 v4567x1 = _mm_movehl_ps(v67x0_67x1, v45x0_45x1);
const __m128 v4567x2 = _mm_movelh_ps(v45x2_45x3, v67x2_67x3);
const __m128 v4567x3 = _mm_movehl_ps(v67x2_67x3, v45x2_45x3);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
_mm_store_ps(packed_w + 8, v0123x1);
_mm_store_ps(packed_w + 12, v4567x1);
_mm_store_ps(packed_w + 16, v0123x2);
_mm_store_ps(packed_w + 20, v4567x2);
_mm_store_ps(packed_w + 24, v0123x3);
_mm_store_ps(packed_w + 28, v4567x3);
packed_w += 32;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
case 1:
{
const __m128 v0x0 = _mm_load_ss(w0);
w0 += 1;
const __m128 v1x0 = _mm_load_ss(w1);
w1 += 1;
const __m128 v2x0 = _mm_load_ss(w2);
w2 += 1;
const __m128 v3x0 = _mm_load_ss(w3);
w3 += 1;
const __m128 v4x0 = _mm_load_ss(w4);
w4 += 1;
const __m128 v5x0 = _mm_load_ss(w5);
w5 += 1;
const __m128 v6x0 = _mm_load_ss(w6);
w6 += 1;
const __m128 v7x0 = _mm_load_ss(w7);
w7 += 1;
const __m128 v01x0 = _mm_unpacklo_ps(v0x0, v1x0);
const __m128 v23x0 = _mm_unpacklo_ps(v2x0, v3x0);
const __m128 v45x0 = _mm_unpacklo_ps(v4x0, v5x0);
const __m128 v67x0 = _mm_unpacklo_ps(v6x0, v7x0);
const __m128 v0123x0 = _mm_movelh_ps(v01x0, v23x0);
const __m128 v4567x0 = _mm_movelh_ps(v45x0, v67x0);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
packed_w += 8;
break;
}
case 2:
{
const __m128 v0x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w0));
w0 += 2;
const __m128 v1x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w1));
w1 += 2;
const __m128 v2x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w2));
w2 += 2;
const __m128 v3x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w3));
w3 += 2;
const __m128 v4x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w4));
w4 += 2;
const __m128 v5x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w5));
w5 += 2;
const __m128 v6x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w6));
w6 += 2;
const __m128 v7x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w7));
w7 += 2;
const __m128 v01x0_01x1 = _mm_unpacklo_ps(v0x01, v1x01);
const __m128 v23x0_23x1 = _mm_unpacklo_ps(v2x01, v3x01);
const __m128 v45x0_45x1 = _mm_unpacklo_ps(v4x01, v5x01);
const __m128 v67x0_67x1 = _mm_unpacklo_ps(v6x01, v7x01);
const __m128 v0123x0 = _mm_movelh_ps(v01x0_01x1, v23x0_23x1);
const __m128 v0123x1 = _mm_movehl_ps(v23x0_23x1, v01x0_01x1);
const __m128 v4567x0 = _mm_movelh_ps(v45x0_45x1, v67x0_67x1);
const __m128 v4567x1 = _mm_movehl_ps(v67x0_67x1, v45x0_45x1);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
_mm_store_ps(packed_w + 8, v0123x1);
_mm_store_ps(packed_w + 12, v4567x1);
packed_w += 16;
break;
}
case 3:
{
__m128 v0x012 = _mm_load_ss(w0 + 2);
__m128 v1x012 = _mm_load_ss(w1 + 2);
__m128 v2x012 = _mm_load_ss(w2 + 2);
__m128 v3x012 = _mm_load_ss(w3 + 2);
__m128 v4x012 = _mm_load_ss(w4 + 2);
__m128 v5x012 = _mm_load_ss(w5 + 2);
__m128 v6x012 = _mm_load_ss(w6 + 2);
__m128 v7x012 = _mm_load_ss(w7 + 2);
v0x012 = _mm_movelh_ps(v0x012, v0x012);
v1x012 = _mm_movelh_ps(v1x012, v1x012);
v2x012 = _mm_movelh_ps(v2x012, v2x012);
v3x012 = _mm_movelh_ps(v3x012, v3x012);
v4x012 = _mm_movelh_ps(v4x012, v4x012);
v5x012 = _mm_movelh_ps(v5x012, v5x012);
v6x012 = _mm_movelh_ps(v6x012, v6x012);
v7x012 = _mm_movelh_ps(v7x012, v7x012);
v0x012 = _mm_loadl_pi(v0x012, (const __m64*) w0);
w0 += 3;
v1x012 = _mm_loadl_pi(v1x012, (const __m64*) w1);
w1 += 3;
v2x012 = _mm_loadl_pi(v2x012, (const __m64*) w2);
w2 += 3;
v3x012 = _mm_loadl_pi(v3x012, (const __m64*) w3);
w3 += 3;
v4x012 = _mm_loadl_pi(v4x012, (const __m64*) w4);
w4 += 3;
v5x012 = _mm_loadl_pi(v5x012, (const __m64*) w5);
w5 += 3;
v6x012 = _mm_loadl_pi(v6x012, (const __m64*) w6);
w6 += 3;
v7x012 = _mm_loadl_pi(v7x012, (const __m64*) w7);
w7 += 3;
const __m128 v01x0_01x1 = _mm_unpacklo_ps(v0x012, v1x012);
const __m128 v23x0_23x1 = _mm_unpacklo_ps(v2x012, v3x012);
const __m128 v01x2 = _mm_unpackhi_ps(v0x012, v1x012);
const __m128 v23x2 = _mm_unpackhi_ps(v2x012, v3x012);
const __m128 v45x0_45x1 = _mm_unpacklo_ps(v4x012, v5x012);
const __m128 v67x0_67x1 = _mm_unpacklo_ps(v6x012, v7x012);
const __m128 v45x2 = _mm_unpackhi_ps(v4x012, v5x012);
const __m128 v67x2 = _mm_unpackhi_ps(v6x012, v7x012);
const __m128 v0123x0 = _mm_movelh_ps(v01x0_01x1, v23x0_23x1);
const __m128 v0123x1 = _mm_movehl_ps(v23x0_23x1, v01x0_01x1);
const __m128 v0123x2 = _mm_movelh_ps(v01x2, v23x2);
const __m128 v4567x0 = _mm_movelh_ps(v45x0_45x1, v67x0_67x1);
const __m128 v4567x1 = _mm_movehl_ps(v67x0_67x1, v45x0_45x1);
const __m128 v4567x2 = _mm_movelh_ps(v45x2, v67x2);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
_mm_store_ps(packed_w + 8, v0123x1);
_mm_store_ps(packed_w + 12, v4567x1);
_mm_store_ps(packed_w + 16, v0123x2);
_mm_store_ps(packed_w + 20, v4567x2);
packed_w += 24;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_w = (float*) ((uintptr_t) packed_w + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*packed_w++ = *b++;
} while (--nb != 0);
packed_w += (8 - n);
} else {
const __m128 vzero = _mm_setzero_ps();
_mm_store_ps(packed_w, vzero);
_mm_store_ps(packed_w + 4, vzero);
packed_w += 8;
}
const float* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const float* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const float* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const float* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const float* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const float* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
size_t k = kc;
// KC multiple of 4
for (; k >= 4; k -= 4) {
const __m128 v0x0123 = _mm_loadu_ps(w0);
w0 += 4;
const __m128 v1x0123 = _mm_loadu_ps(w1);
w1 += 4;
const __m128 v2x0123 = _mm_loadu_ps(w2);
w2 += 4;
const __m128 v3x0123 = _mm_loadu_ps(w3);
w3 += 4;
const __m128 v4x0123 = _mm_loadu_ps(w4);
w4 += 4;
const __m128 v5x0123 = _mm_loadu_ps(w5);
w5 += 4;
const __m128 v6x0123 = _mm_loadu_ps(w6);
w6 += 4;
const __m128 v01x0_01x1 = _mm_unpacklo_ps(v0x0123, v1x0123);
const __m128 v23x0_23x1 = _mm_unpacklo_ps(v2x0123, v3x0123);
const __m128 v01x2_01x3 = _mm_unpackhi_ps(v0x0123, v1x0123);
const __m128 v23x2_23x3 = _mm_unpackhi_ps(v2x0123, v3x0123);
const __m128 v45x0_45x1 = _mm_unpacklo_ps(v4x0123, v5x0123);
const __m128 v67x0_67x1 = _mm_unpacklo_ps(v6x0123, v6x0123);
const __m128 v45x2_45x3 = _mm_unpackhi_ps(v4x0123, v5x0123);
const __m128 v67x2_67x3 = _mm_unpackhi_ps(v6x0123, v6x0123);
const __m128 v0123x0 = _mm_movelh_ps(v01x0_01x1, v23x0_23x1);
const __m128 v0123x1 = _mm_movehl_ps(v23x0_23x1, v01x0_01x1);
const __m128 v0123x2 = _mm_movelh_ps(v01x2_01x3, v23x2_23x3);
const __m128 v0123x3 = _mm_movehl_ps(v23x2_23x3, v01x2_01x3);
const __m128 v4567x0 = _mm_movelh_ps(v45x0_45x1, v67x0_67x1);
const __m128 v4567x1 = _mm_movehl_ps(v67x0_67x1, v45x0_45x1);
const __m128 v4567x2 = _mm_movelh_ps(v45x2_45x3, v67x2_67x3);
const __m128 v4567x3 = _mm_movehl_ps(v67x2_67x3, v45x2_45x3);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
_mm_store_ps(packed_w + 8, v0123x1);
_mm_store_ps(packed_w + 12, v4567x1);
_mm_store_ps(packed_w + 16, v0123x2);
_mm_store_ps(packed_w + 20, v4567x2);
_mm_store_ps(packed_w + 24, v0123x3);
_mm_store_ps(packed_w + 28, v4567x3);
packed_w += 32;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
case 1:
{
const __m128 v0x0 = _mm_load_ss(w0);
const __m128 v1x0 = _mm_load_ss(w1);
const __m128 v2x0 = _mm_load_ss(w2);
const __m128 v3x0 = _mm_load_ss(w3);
const __m128 v4x0 = _mm_load_ss(w4);
const __m128 v5x0 = _mm_load_ss(w5);
const __m128 v6x0 = _mm_load_ss(w6);
const __m128 v01x0 = _mm_unpacklo_ps(v0x0, v1x0);
const __m128 v23x0 = _mm_unpacklo_ps(v2x0, v3x0);
const __m128 v45x0 = _mm_unpacklo_ps(v4x0, v5x0);
const __m128 v67x0 = _mm_unpacklo_ps(v6x0, v6x0);
const __m128 v0123x0 = _mm_movelh_ps(v01x0, v23x0);
const __m128 v4567x0 = _mm_movelh_ps(v45x0, v67x0);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
packed_w += 8;
break;
}
case 2:
{
const __m128 v0x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w0));
const __m128 v1x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w1));
const __m128 v2x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w2));
const __m128 v3x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w3));
const __m128 v4x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w4));
const __m128 v5x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w5));
const __m128 v6x01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w6));
const __m128 v01x0_01x1 = _mm_unpacklo_ps(v0x01, v1x01);
const __m128 v23x0_23x1 = _mm_unpacklo_ps(v2x01, v3x01);
const __m128 v45x0_45x1 = _mm_unpacklo_ps(v4x01, v5x01);
const __m128 v67x0_67x1 = _mm_unpacklo_ps(v6x01, v6x01);
const __m128 v0123x0 = _mm_movelh_ps(v01x0_01x1, v23x0_23x1);
const __m128 v0123x1 = _mm_movehl_ps(v23x0_23x1, v01x0_01x1);
const __m128 v4567x0 = _mm_movelh_ps(v45x0_45x1, v67x0_67x1);
const __m128 v4567x1 = _mm_movehl_ps(v67x0_67x1, v45x0_45x1);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
_mm_store_ps(packed_w + 8, v0123x1);
_mm_store_ps(packed_w + 12, v4567x1);
packed_w += 16;
break;
}
case 3:
{
__m128 v0x012 = _mm_load_ss(w0 + 2);
__m128 v1x012 = _mm_load_ss(w1 + 2);
__m128 v2x012 = _mm_load_ss(w2 + 2);
__m128 v3x012 = _mm_load_ss(w3 + 2);
__m128 v4x012 = _mm_load_ss(w4 + 2);
__m128 v5x012 = _mm_load_ss(w5 + 2);
__m128 v6x012 = _mm_load_ss(w6 + 2);
v0x012 = _mm_movelh_ps(v0x012, v0x012);
v1x012 = _mm_movelh_ps(v1x012, v1x012);
v2x012 = _mm_movelh_ps(v2x012, v2x012);
v3x012 = _mm_movelh_ps(v3x012, v3x012);
v4x012 = _mm_movelh_ps(v4x012, v4x012);
v5x012 = _mm_movelh_ps(v5x012, v5x012);
v6x012 = _mm_movelh_ps(v6x012, v6x012);
v0x012 = _mm_loadl_pi(v0x012, (const __m64*) w0);
v1x012 = _mm_loadl_pi(v1x012, (const __m64*) w1);
v2x012 = _mm_loadl_pi(v2x012, (const __m64*) w2);
v3x012 = _mm_loadl_pi(v3x012, (const __m64*) w3);
v4x012 = _mm_loadl_pi(v4x012, (const __m64*) w4);
v5x012 = _mm_loadl_pi(v5x012, (const __m64*) w5);
v6x012 = _mm_loadl_pi(v6x012, (const __m64*) w6);
const __m128 v01x0_01x1 = _mm_unpacklo_ps(v0x012, v1x012);
const __m128 v23x0_23x1 = _mm_unpacklo_ps(v2x012, v3x012);
const __m128 v01x2 = _mm_unpackhi_ps(v0x012, v1x012);
const __m128 v23x2 = _mm_unpackhi_ps(v2x012, v3x012);
const __m128 v45x0_45x1 = _mm_unpacklo_ps(v4x012, v5x012);
const __m128 v67x0_67x1 = _mm_unpacklo_ps(v6x012, v6x012);
const __m128 v45x2 = _mm_unpackhi_ps(v4x012, v5x012);
const __m128 v67x2 = _mm_unpackhi_ps(v6x012, v6x012);
const __m128 v0123x0 = _mm_movelh_ps(v01x0_01x1, v23x0_23x1);
const __m128 v0123x1 = _mm_movehl_ps(v23x0_23x1, v01x0_01x1);
const __m128 v0123x2 = _mm_movelh_ps(v01x2, v23x2);
const __m128 v4567x0 = _mm_movelh_ps(v45x0_45x1, v67x0_67x1);
const __m128 v4567x1 = _mm_movehl_ps(v67x0_67x1, v45x0_45x1);
const __m128 v4567x2 = _mm_movelh_ps(v45x2, v67x2);
_mm_store_ps(packed_w, v0123x0);
_mm_store_ps(packed_w + 4, v4567x0);
_mm_store_ps(packed_w + 8, v0123x1);
_mm_store_ps(packed_w + 12, v4567x1);
_mm_store_ps(packed_w + 16, v0123x2);
_mm_store_ps(packed_w + 20, v4567x2);
packed_w += 24;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_w = (float*) ((uintptr_t) packed_w + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 18,007
| 37.561028
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x8-gemm-goi-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x8__wasmsimd_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
do {
// NC main loop multiple of 8
const uint32_t* w0 = (const uint32_t*) weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(bias != NULL) {
const v128_t vb0123 = wasm_v128_load(bias);
const v128_t vb4567 = wasm_v128_load(bias + 4);
bias += 8;
wasm_v128_store(packed_weights, vb0123);
wasm_v128_store(packed_weights + 4, vb4567);
} else {
const v128_t vzero = wasm_i32x4_const_splat(0);
wasm_v128_store(packed_weights, vzero);
wasm_v128_store(packed_weights + 4, vzero);
}
packed_weights += 8;
const uint32_t* w1 = w0 + kc;
const uint32_t* w2 = w1 + kc;
const uint32_t* w3 = w2 + kc;
const uint32_t* w4 = w3 + kc;
const uint32_t* w5 = w4 + kc;
const uint32_t* w6 = w5 + kc;
const uint32_t* w7 = w6 + kc;
// KC main loop multiple of 8x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const v128_t v0x0123 = wasm_v128_load(w0);
w0 += 4;
const v128_t v1x0123 = wasm_v128_load(w1);
w1 += 4;
const v128_t v2x0123 = wasm_v128_load(w2);
w2 += 4;
const v128_t v3x0123 = wasm_v128_load(w3);
w3 += 4;
const v128_t v4x0123 = wasm_v128_load(w4);
w4 += 4;
const v128_t v5x0123 = wasm_v128_load(w5);
w5 += 4;
const v128_t v6x0123 = wasm_v128_load(w6);
w6 += 4;
const v128_t v7x0123 = wasm_v128_load(w7);
w7 += 4;
const v128_t v01x0_01x1 = wasm_v32x4_shuffle(v0x0123, v1x0123, 0, 4, 1, 5);
const v128_t v23x0_23x1 = wasm_v32x4_shuffle(v2x0123, v3x0123, 0, 4, 1, 5);
const v128_t v01x2_01x3 = wasm_v32x4_shuffle(v0x0123, v1x0123, 2, 6, 3, 7);
const v128_t v23x2_23x3 = wasm_v32x4_shuffle(v2x0123, v3x0123, 2, 6, 3, 7);
const v128_t v45x0_45x1 = wasm_v32x4_shuffle(v4x0123, v5x0123, 0, 4, 1, 5);
const v128_t v67x0_67x1 = wasm_v32x4_shuffle(v6x0123, v7x0123, 0, 4, 1, 5);
const v128_t v45x2_45x3 = wasm_v32x4_shuffle(v4x0123, v5x0123, 2, 6, 3, 7);
const v128_t v67x2_67x3 = wasm_v32x4_shuffle(v6x0123, v7x0123, 2, 6, 3, 7);
const v128_t v0123x0 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 0, 2);
const v128_t v0123x1 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 1, 3);
const v128_t v0123x2 = wasm_v64x2_shuffle(v01x2_01x3, v23x2_23x3, 0, 2);
const v128_t v0123x3 = wasm_v64x2_shuffle(v01x2_01x3, v23x2_23x3, 1, 3);
const v128_t v4567x0 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 0, 2);
const v128_t v4567x1 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 1, 3);
const v128_t v4567x2 = wasm_v64x2_shuffle(v45x2_45x3, v67x2_67x3, 0, 2);
const v128_t v4567x3 = wasm_v64x2_shuffle(v45x2_45x3, v67x2_67x3, 1, 3);
wasm_v128_store(packed_weights, v0123x0);
wasm_v128_store(packed_weights + 4, v4567x0);
wasm_v128_store(packed_weights + 8, v0123x1);
wasm_v128_store(packed_weights + 12, v4567x1);
wasm_v128_store(packed_weights + 16, v0123x2);
wasm_v128_store(packed_weights + 20, v4567x2);
wasm_v128_store(packed_weights + 24, v0123x3);
wasm_v128_store(packed_weights + 28, v4567x3);
packed_weights += 32;
}
if XNN_UNLIKELY(k != 0) {
// KC remainder (1..3)
assert(k >= 1);
assert(k <= 3);
switch (k) {
case 1:
{
v128_t v0123x0 = wasm_v128_load32_zero(w0);
w0 += 1;
v128_t v4567x0 = wasm_v128_load32_zero(w4);
w4 += 1;
v0123x0 = wasm_v128_load32_lane(w1, v0123x0, 1);
w1 += 1;
v4567x0 = wasm_v128_load32_lane(w5, v4567x0, 1);
w5 += 1;
v0123x0 = wasm_v128_load32_lane(w2, v0123x0, 2);
w2 += 1;
v4567x0 = wasm_v128_load32_lane(w6, v4567x0, 2);
w6 += 1;
v0123x0 = wasm_v128_load32_lane(w3, v0123x0, 3);
w3 += 1;
v4567x0 = wasm_v128_load32_lane(w7, v4567x0, 3);
w7 += 1;
wasm_v128_store(packed_weights, v0123x0);
wasm_v128_store(packed_weights + 4, v4567x0);
packed_weights += 8;
break;
}
case 2:
{
const v128_t v0x01 = wasm_v128_load64_zero(w0);
w0 += 2;
const v128_t v1x01 = wasm_v128_load64_zero(w1);
w1 += 2;
const v128_t v2x01 = wasm_v128_load64_zero(w2);
w2 += 2;
const v128_t v3x01 = wasm_v128_load64_zero(w3);
w3 += 2;
const v128_t v4x01 = wasm_v128_load64_zero(w4);
w4 += 2;
const v128_t v5x01 = wasm_v128_load64_zero(w5);
w5 += 2;
const v128_t v6x01 = wasm_v128_load64_zero(w6);
w6 += 2;
const v128_t v7x01 = wasm_v128_load64_zero(w7);
w7 += 2;
const v128_t v01x0_01x1 = wasm_v32x4_shuffle(v0x01, v1x01, 0, 4, 1, 5);
const v128_t v23x0_23x1 = wasm_v32x4_shuffle(v2x01, v3x01, 0, 4, 1, 5);
const v128_t v45x0_45x1 = wasm_v32x4_shuffle(v4x01, v5x01, 0, 4, 1, 5);
const v128_t v67x0_67x1 = wasm_v32x4_shuffle(v6x01, v7x01, 0, 4, 1, 5);
const v128_t v0123x0 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 0, 2);
const v128_t v0123x1 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 1, 3);
const v128_t v4567x0 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 0, 2);
const v128_t v4567x1 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 1, 3);
wasm_v128_store(packed_weights, v0123x0);
wasm_v128_store(packed_weights + 4, v4567x0);
wasm_v128_store(packed_weights + 8, v0123x1);
wasm_v128_store(packed_weights + 12, v4567x1);
packed_weights += 16;
break;
}
case 3:
{
v128_t v0x012 = wasm_v128_load64_zero(w0);
w0 += 2;
v128_t v1x012 = wasm_v128_load64_zero(w1);
w1 += 2;
v128_t v2x012 = wasm_v128_load64_zero(w2);
w2 += 2;
v128_t v3x012 = wasm_v128_load64_zero(w3);
w3 += 2;
v128_t v4x012 = wasm_v128_load64_zero(w4);
w4 += 2;
v128_t v5x012 = wasm_v128_load64_zero(w5);
w5 += 2;
v128_t v6x012 = wasm_v128_load64_zero(w6);
w6 += 2;
v128_t v7x012 = wasm_v128_load64_zero(w7);
w7 += 2;
v0x012 = wasm_v128_load32_lane(w0, v0x012, 2);
w0 += 1;
v1x012 = wasm_v128_load32_lane(w1, v1x012, 2);
w1 += 1;
v2x012 = wasm_v128_load32_lane(w2, v2x012, 2);
w2 += 1;
v3x012 = wasm_v128_load32_lane(w3, v3x012, 2);
w3 += 1;
v4x012 = wasm_v128_load32_lane(w4, v4x012, 2);
w4 += 1;
v5x012 = wasm_v128_load32_lane(w5, v5x012, 2);
w5 += 1;
v6x012 = wasm_v128_load32_lane(w6, v6x012, 2);
w6 += 1;
v7x012 = wasm_v128_load32_lane(w7, v7x012, 2);
w7 += 1;
const v128_t v01x0_01x1 = wasm_v32x4_shuffle(v0x012, v1x012, 0, 4, 1, 5);
const v128_t v23x0_23x1 = wasm_v32x4_shuffle(v2x012, v3x012, 0, 4, 1, 5);
const v128_t v01x2 = wasm_v32x4_shuffle(v0x012, v1x012, 2, 6, 3, 7);
const v128_t v23x2 = wasm_v32x4_shuffle(v2x012, v3x012, 2, 6, 3, 7);
const v128_t v45x0_45x1 = wasm_v32x4_shuffle(v4x012, v5x012, 0, 4, 1, 5);
const v128_t v67x0_67x1 = wasm_v32x4_shuffle(v6x012, v7x012, 0, 4, 1, 5);
const v128_t v45x2 = wasm_v32x4_shuffle(v4x012, v5x012, 2, 6, 3, 7);
const v128_t v67x2 = wasm_v32x4_shuffle(v6x012, v7x012, 2, 6, 3, 7);
const v128_t v0123x0 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 0, 2);
const v128_t v0123x1 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 1, 3);
const v128_t v0123x2 = wasm_v64x2_shuffle(v01x2, v23x2, 0, 2);
const v128_t v4567x0 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 0, 2);
const v128_t v4567x1 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 1, 3);
const v128_t v4567x2 = wasm_v64x2_shuffle(v45x2, v67x2, 0, 2);
wasm_v128_store(packed_weights, v0123x0);
wasm_v128_store(packed_weights + 4, v4567x0);
wasm_v128_store(packed_weights + 8, v0123x1);
wasm_v128_store(packed_weights + 12, v4567x1);
wasm_v128_store(packed_weights + 16, v0123x2);
wasm_v128_store(packed_weights + 20, v4567x2);
packed_weights += 24;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (8 - n);
} else {
const v128_t vzero = wasm_i32x4_const_splat(0);
wasm_v128_store(packed_weights, vzero);
wasm_v128_store(packed_weights + 4, vzero);
packed_weights += 8;
}
const uint32_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint32_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint32_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint32_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint32_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint32_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
size_t k = kc;
for (; k >= 4; k -= 4) {
const v128_t v0x0123 = wasm_v128_load(w0);
w0 += 4;
const v128_t v1x0123 = wasm_v128_load(w1);
w1 += 4;
const v128_t v2x0123 = wasm_v128_load(w2);
w2 += 4;
const v128_t v3x0123 = wasm_v128_load(w3);
w3 += 4;
const v128_t v4x0123 = wasm_v128_load(w4);
w4 += 4;
const v128_t v5x0123 = wasm_v128_load(w5);
w5 += 4;
const v128_t v6x0123 = wasm_v128_load(w6);
w6 += 4;
const v128_t v01x0_01x1 = wasm_v32x4_shuffle(v0x0123, v1x0123, 0, 4, 1, 5);
const v128_t v23x0_23x1 = wasm_v32x4_shuffle(v2x0123, v3x0123, 0, 4, 1, 5);
const v128_t v01x2_01x3 = wasm_v32x4_shuffle(v0x0123, v1x0123, 2, 6, 3, 7);
const v128_t v23x2_23x3 = wasm_v32x4_shuffle(v2x0123, v3x0123, 2, 6, 3, 7);
const v128_t v45x0_45x1 = wasm_v32x4_shuffle(v4x0123, v5x0123, 0, 4, 1, 5);
const v128_t v67x0_67x1 = wasm_v32x4_shuffle(v6x0123, v6x0123, 0, 4, 1, 5);
const v128_t v45x2_45x3 = wasm_v32x4_shuffle(v4x0123, v5x0123, 2, 6, 3, 7);
const v128_t v67x2_67x3 = wasm_v32x4_shuffle(v6x0123, v6x0123, 2, 6, 3, 7);
const v128_t v0123x0 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 0, 2);
const v128_t v0123x1 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 1, 3);
const v128_t v0123x2 = wasm_v64x2_shuffle(v01x2_01x3, v23x2_23x3, 0, 2);
const v128_t v0123x3 = wasm_v64x2_shuffle(v01x2_01x3, v23x2_23x3, 1, 3);
const v128_t v4567x0 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 0, 2);
const v128_t v4567x1 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 1, 3);
const v128_t v4567x2 = wasm_v64x2_shuffle(v45x2_45x3, v67x2_67x3, 0, 2);
const v128_t v4567x3 = wasm_v64x2_shuffle(v45x2_45x3, v67x2_67x3, 1, 3);
wasm_v128_store(packed_weights, v0123x0);
wasm_v128_store(packed_weights + 4, v4567x0);
wasm_v128_store(packed_weights + 8, v0123x1);
wasm_v128_store(packed_weights + 12, v4567x1);
wasm_v128_store(packed_weights + 16, v0123x2);
wasm_v128_store(packed_weights + 20, v4567x2);
wasm_v128_store(packed_weights + 24, v0123x3);
wasm_v128_store(packed_weights + 28, v4567x3);
packed_weights += 32;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
case 1:
{
v128_t v0123x0 = wasm_v128_load32_zero(w0);
w0 += 1;
v128_t v4567x0 = wasm_v128_load32_zero(w4);
w4 += 1;
v0123x0 = wasm_v128_load32_lane(w1, v0123x0, 1);
w1 += 1;
v4567x0 = wasm_v128_load32_lane(w5, v4567x0, 1);
w5 += 1;
v0123x0 = wasm_v128_load32_lane(w2, v0123x0, 2);
w2 += 1;
v4567x0 = wasm_v128_load32_lane(w6, v4567x0, 2);
w6 += 1;
v0123x0 = wasm_v128_load32_lane(w3, v0123x0, 3);
w3 += 1;
wasm_v128_store(packed_weights, v0123x0);
wasm_v128_store(packed_weights + 4, v4567x0);
packed_weights += 8;
break;
}
case 2:
{
const v128_t v0x01 = wasm_v128_load64_zero(w0);
w0 += 2;
const v128_t v1x01 = wasm_v128_load64_zero(w1);
w1 += 2;
const v128_t v2x01 = wasm_v128_load64_zero(w2);
w2 += 2;
const v128_t v3x01 = wasm_v128_load64_zero(w3);
w3 += 2;
const v128_t v4x01 = wasm_v128_load64_zero(w4);
w4 += 2;
const v128_t v5x01 = wasm_v128_load64_zero(w5);
w5 += 2;
const v128_t v6x01 = wasm_v128_load64_zero(w6);
w6 += 2;
const v128_t v01x0_01x1 = wasm_v32x4_shuffle(v0x01, v1x01, 0, 4, 1, 5);
const v128_t v23x0_23x1 = wasm_v32x4_shuffle(v2x01, v3x01, 0, 4, 1, 5);
const v128_t v45x0_45x1 = wasm_v32x4_shuffle(v4x01, v5x01, 0, 4, 1, 5);
const v128_t v67x0_67x1 = wasm_v32x4_shuffle(v6x01, v6x01, 0, 4, 1, 5);
const v128_t v0123x0 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 0, 2);
const v128_t v0123x1 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 1, 3);
const v128_t v4567x0 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 0, 2);
const v128_t v4567x1 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 1, 3);
wasm_v128_store(packed_weights, v0123x0);
wasm_v128_store(packed_weights + 4, v4567x0);
wasm_v128_store(packed_weights + 8, v0123x1);
wasm_v128_store(packed_weights + 12, v4567x1);
packed_weights += 16;
break;
}
case 3:
{
v128_t v0x012 = wasm_v128_load64_zero(w0);
w0 += 2;
v128_t v1x012 = wasm_v128_load64_zero(w1);
w1 += 2;
v128_t v2x012 = wasm_v128_load64_zero(w2);
w2 += 2;
v128_t v3x012 = wasm_v128_load64_zero(w3);
w3 += 2;
v128_t v4x012 = wasm_v128_load64_zero(w4);
w4 += 2;
v128_t v5x012 = wasm_v128_load64_zero(w5);
w5 += 2;
v128_t v6x012 = wasm_v128_load64_zero(w6);
w6 += 2;
v0x012 = wasm_v128_load32_lane(w0, v0x012, 2);
w0 += 1;
v1x012 = wasm_v128_load32_lane(w1, v1x012, 2);
w1 += 1;
v2x012 = wasm_v128_load32_lane(w2, v2x012, 2);
w2 += 1;
v3x012 = wasm_v128_load32_lane(w3, v3x012, 2);
w3 += 1;
v4x012 = wasm_v128_load32_lane(w4, v4x012, 2);
w4 += 1;
v5x012 = wasm_v128_load32_lane(w5, v5x012, 2);
w5 += 1;
v6x012 = wasm_v128_load32_lane(w6, v6x012, 2);
w6 += 1;
const v128_t v01x0_01x1 = wasm_v32x4_shuffle(v0x012, v1x012, 0, 4, 1, 5);
const v128_t v23x0_23x1 = wasm_v32x4_shuffle(v2x012, v3x012, 0, 4, 1, 5);
const v128_t v01x2 = wasm_v32x4_shuffle(v0x012, v1x012, 2, 6, 3, 7);
const v128_t v23x2 = wasm_v32x4_shuffle(v2x012, v3x012, 2, 6, 3, 7);
const v128_t v45x0_45x1 = wasm_v32x4_shuffle(v4x012, v5x012, 0, 4, 1, 5);
const v128_t v67x0_67x1 = wasm_v32x4_shuffle(v6x012, v6x012, 0, 4, 1, 5);
const v128_t v45x2 = wasm_v32x4_shuffle(v4x012, v5x012, 2, 6, 3, 7);
const v128_t v67x2 = wasm_v32x4_shuffle(v6x012, v6x012, 2, 6, 3, 7);
const v128_t v0123x0 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 0, 2);
const v128_t v0123x1 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 1, 3);
const v128_t v0123x2 = wasm_v64x2_shuffle(v01x2, v23x2, 0, 2);
const v128_t v4567x0 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 0, 2);
const v128_t v4567x1 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 1, 3);
const v128_t v4567x2 = wasm_v64x2_shuffle(v45x2, v67x2, 0, 2);
wasm_v128_store(packed_weights, v0123x0);
wasm_v128_store(packed_weights + 4, v4567x0);
wasm_v128_store(packed_weights + 8, v0123x1);
wasm_v128_store(packed_weights + 12, v4567x1);
wasm_v128_store(packed_weights + 16, v0123x2);
wasm_v128_store(packed_weights + 20, v4567x2);
packed_weights += 24;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 18,567
| 38.422505
| 85
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x8s4-gemm-goi-sse2-x4-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/s4-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/packw.h>
#include <xnnpack/prefetch.h>
void xnn_x32_packw_gemm_goi_ukernel_x8s4__sse2_x4_prfm(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 4);
assert(weights != NULL);
assert(packed_weights != NULL);
const float* b = (const float*) bias;
float* packed_w = (float*) packed_weights;
do {
// NC main loop multiple of 8
const float* w0 = (const float*) weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(b != NULL) {
const __m128 vb0 = _mm_loadu_ps(b);
const __m128 vb4 = _mm_loadu_ps(b + 4);
_mm_store_ps(packed_w, vb0);
_mm_store_ps(packed_w + 4, vb4);
b += 8;
} else {
const __m128 vzero = _mm_setzero_ps();
_mm_store_ps(packed_w, vzero);
_mm_store_ps(packed_w + 4, vzero);
}
packed_w += 8;
const float* w1 = w0 + kc;
const float* w2 = w1 + kc;
const float* w3 = w2 + kc;
const float* w4 = w3 + kc;
const float* w5 = w4 + kc;
const float* w6 = w5 + kc;
const float* w7 = w6 + kc;
xnn_prefetch_to_l1((const int8_t*) w0);
xnn_prefetch_to_l1((const int8_t*) w0 + 64);
xnn_prefetch_to_l1((const int8_t*) w1);
xnn_prefetch_to_l1((const int8_t*) w1 + 64);
xnn_prefetch_to_l1((const int8_t*) w2);
xnn_prefetch_to_l1((const int8_t*) w2 + 64);
xnn_prefetch_to_l1((const int8_t*) w3);
xnn_prefetch_to_l1((const int8_t*) w3 + 64);
xnn_prefetch_to_l1((const int8_t*) w4);
xnn_prefetch_to_l1((const int8_t*) w4 + 64);
xnn_prefetch_to_l1((const int8_t*) w5);
xnn_prefetch_to_l1((const int8_t*) w5 + 64);
xnn_prefetch_to_l1((const int8_t*) w6);
xnn_prefetch_to_l1((const int8_t*) w6 + 64);
xnn_prefetch_to_l1((const int8_t*) w7);
xnn_prefetch_to_l1((const int8_t*) w7 + 64);
size_t k = kc;
// KC multiple of 4
for (; k >= 4; k -= 4) {
// Read blocks of 4x4
// a b c d
// e f g h
// i j k l
// m n o p
__m128 v0x0123 = _mm_loadu_ps(w0);
w0 += 4;
__m128 v1x0123 = _mm_loadu_ps(w1);
w1 += 4;
__m128 v2x0123 = _mm_loadu_ps(w2);
w2 += 4;
__m128 v3x0123 = _mm_loadu_ps(w3);
w3 += 4;
__m128 v4x0123 = _mm_loadu_ps(w4);
w4 += 4;
__m128 v5x0123 = _mm_loadu_ps(w5);
w5 += 4;
__m128 v6x0123 = _mm_loadu_ps(w6);
w6 += 4;
__m128 v7x0123 = _mm_loadu_ps(w7);
w7 += 4;
// Apply SR4 shuffle
v1x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v1x0123), _MM_SHUFFLE(0, 3, 2, 1)));
v2x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v2x0123), _MM_SHUFFLE(1, 0, 3, 2)));
v3x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v3x0123), _MM_SHUFFLE(2, 1, 0, 3)));
v5x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v5x0123), _MM_SHUFFLE(0, 3, 2, 1)));
v6x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v6x0123), _MM_SHUFFLE(1, 0, 3, 2)));
v7x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v7x0123), _MM_SHUFFLE(2, 1, 0, 3)));
// Transpose 2x2
const __m128 vtmp0x0123 = _mm_unpacklo_ps(v0x0123, v1x0123); // a e b f from row 0, 1
const __m128 vtmp1x0123 = _mm_unpacklo_ps(v2x0123, v3x0123); // i m j n from row 2, 3
const __m128 vtmp2x0123 = _mm_unpackhi_ps(v0x0123, v1x0123); // c g d h from row 0, 1
const __m128 vtmp3x0123 = _mm_unpackhi_ps(v2x0123, v3x0123); // k o l p from row 2, 3
const __m128 vtmp4x0123 = _mm_unpacklo_ps(v4x0123, v5x0123); // a e b f from row 0, 1
const __m128 vtmp5x0123 = _mm_unpacklo_ps(v6x0123, v7x0123); // i m j n from row 2, 3
const __m128 vtmp6x0123 = _mm_unpackhi_ps(v4x0123, v5x0123); // c g d h from row 0, 1
const __m128 vtmp7x0123 = _mm_unpackhi_ps(v6x0123, v7x0123); // k o l p from row 2, 3
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
xnn_prefetch_to_l1((const int8_t*) w7 + 128);
// Transpose 4x4
v0x0123 = _mm_movelh_ps(vtmp0x0123, vtmp1x0123); // a e i m from row 0, 1
v1x0123 = _mm_movehl_ps(vtmp1x0123, vtmp0x0123); // b f j n from row 0, 1
v2x0123 = _mm_movelh_ps(vtmp2x0123, vtmp3x0123); // c g k o from row 2, 3
v3x0123 = _mm_movehl_ps(vtmp3x0123, vtmp2x0123); // d h l p from row 2, 3
v4x0123 = _mm_movelh_ps(vtmp4x0123, vtmp5x0123); // a e i m from row 0, 1
v5x0123 = _mm_movehl_ps(vtmp5x0123, vtmp4x0123); // b f j n from row 0, 1
v6x0123 = _mm_movelh_ps(vtmp6x0123, vtmp7x0123); // c g k o from row 2, 3
v7x0123 = _mm_movehl_ps(vtmp7x0123, vtmp6x0123); // d h l p from row 2, 3
_mm_store_ps(packed_w, v0x0123);
_mm_store_ps(packed_w + 4, v4x0123);
_mm_store_ps(packed_w + 8, v1x0123);
_mm_store_ps(packed_w + 12, v5x0123);
_mm_store_ps(packed_w + 16, v2x0123);
_mm_store_ps(packed_w + 20, v6x0123);
_mm_store_ps(packed_w + 24, v3x0123);
_mm_store_ps(packed_w + 28, v7x0123);
packed_w += 32;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
__m128 v0 = _mm_undefined_ps();
__m128 v1 = _mm_undefined_ps();
__m128 v2 = _mm_undefined_ps();
__m128 v3 = _mm_undefined_ps();
__m128 v4 = _mm_undefined_ps();
__m128 v5 = _mm_undefined_ps();
__m128 v6 = _mm_undefined_ps();
__m128 v7 = _mm_undefined_ps();
switch (k) {
case 1:
// Read blocks of 4x1
// a
// e
// i
// m
v0 = _mm_load_ss(w0);
w0 += 1;
v1 = _mm_load_ss(w1);
w1 += 1;
v2 = _mm_load_ss(w2);
w2 += 1;
v3 = _mm_load_ss(w3);
w3 += 1;
v4 = _mm_load_ss(w4);
w4 += 1;
v5 = _mm_load_ss(w5);
w5 += 1;
v6 = _mm_load_ss(w6);
w6 += 1;
v7 = _mm_load_ss(w7);
w7 += 1;
break;
case 2:
// Read blocks of 4x2
// a b
// e f
// i j
// m n
v0 = _mm_castpd_ps(_mm_load_sd((const double*) w0));
w0 += 2;
v1 = _mm_castpd_ps(_mm_load_sd((const double*) w1));
w1 += 2;
v2 = _mm_castpd_ps(_mm_load_sd((const double*) w2));
w2 += 2;
v3 = _mm_castpd_ps(_mm_load_sd((const double*) w3));
w3 += 2;
v4 = _mm_castpd_ps(_mm_load_sd((const double*) w4));
w4 += 2;
v5 = _mm_castpd_ps(_mm_load_sd((const double*) w5));
w5 += 2;
v6 = _mm_castpd_ps(_mm_load_sd((const double*) w6));
w6 += 2;
v7 = _mm_castpd_ps(_mm_load_sd((const double*) w7));
w7 += 2;
break;
case 3:
{
// Read blocks of 4x3
// a b c
// e f g
// i j k
// m n o
const __m128 v0lo = _mm_castpd_ps(_mm_load_sd((const double*) w0));
const __m128 v0hi = _mm_load_ss(w0 + 2);
v0 = _mm_movelh_ps(v0lo, v0hi);
w0 += 3;
const __m128 v1lo = _mm_castpd_ps(_mm_load_sd((const double*) w1));
const __m128 v1hi = _mm_load_ss(w1 + 2);
v1 = _mm_movelh_ps(v1lo, v1hi);
w1 += 3;
const __m128 v2lo = _mm_castpd_ps(_mm_load_sd((const double*) w2));
const __m128 v2hi = _mm_load_ss(w2 + 2);
v2 = _mm_movelh_ps(v2lo, v2hi);
w2 += 3;
const __m128 v3lo = _mm_castpd_ps(_mm_load_sd((const double*) w3));
const __m128 v3hi = _mm_load_ss(w3 + 2);
v3 = _mm_movelh_ps(v3lo, v3hi);
w3 += 3;
const __m128 v4lo = _mm_castpd_ps(_mm_load_sd((const double*) w4));
const __m128 v4hi = _mm_load_ss(w4 + 2);
v4 = _mm_movelh_ps(v4lo, v4hi);
w4 += 3;
const __m128 v5lo = _mm_castpd_ps(_mm_load_sd((const double*) w5));
const __m128 v5hi = _mm_load_ss(w5 + 2);
v5 = _mm_movelh_ps(v5lo, v5hi);
w5 += 3;
const __m128 v6lo = _mm_castpd_ps(_mm_load_sd((const double*) w6));
const __m128 v6hi = _mm_load_ss(w6 + 2);
v6 = _mm_movelh_ps(v6lo, v6hi);
w6 += 3;
const __m128 v7lo = _mm_castpd_ps(_mm_load_sd((const double*) w7));
const __m128 v7hi = _mm_load_ss(w7 + 2);
v7 = _mm_movelh_ps(v7lo, v7hi);
w7 += 3;
break;
}
default:
XNN_UNREACHABLE;
}
// Apply SR4 shuffle
v1 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v1), _MM_SHUFFLE(0, 3, 2, 1)));
v2 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v2), _MM_SHUFFLE(1, 0, 3, 2)));
v3 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v3), _MM_SHUFFLE(2, 1, 0, 3)));
v5 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v5), _MM_SHUFFLE(0, 3, 2, 1)));
v6 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v6), _MM_SHUFFLE(1, 0, 3, 2)));
v7 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v7), _MM_SHUFFLE(2, 1, 0, 3)));
// Transpose 2x2
const __m128 vtmp0 = _mm_unpacklo_ps(v0, v1); // a e b f from row 0, 1
const __m128 vtmp1 = _mm_unpacklo_ps(v2, v3); // i m j n from row 2, 3
const __m128 vtmp2 = _mm_unpackhi_ps(v0, v1); // c g d h from row 0, 1
const __m128 vtmp3 = _mm_unpackhi_ps(v2, v3); // k o l p from row 2, 3
const __m128 vtmp4 = _mm_unpacklo_ps(v4, v5); // a e b f from row 0, 1
const __m128 vtmp5 = _mm_unpacklo_ps(v6, v7); // i m j n from row 2, 3
const __m128 vtmp6 = _mm_unpackhi_ps(v4, v5); // c g d h from row 0, 1
const __m128 vtmp7 = _mm_unpackhi_ps(v6, v7); // k o l p from row 2, 3
// Transpose 4x4
v0 = _mm_movelh_ps(vtmp0, vtmp1); // a e i m from row 0, 1
v1 = _mm_movehl_ps(vtmp1, vtmp0); // b f j n from row 0, 1
v2 = _mm_movelh_ps(vtmp2, vtmp3); // c g k o from row 2, 3
v3 = _mm_movehl_ps(vtmp3, vtmp2); // d h l p from row 2, 3
v4 = _mm_movelh_ps(vtmp4, vtmp5); // a e i m from row 0, 1
v5 = _mm_movehl_ps(vtmp5, vtmp4); // b f j n from row 0, 1
v6 = _mm_movelh_ps(vtmp6, vtmp7); // c g k o from row 2, 3
v7 = _mm_movehl_ps(vtmp7, vtmp6); // d h l p from row 2, 3
_mm_store_ps(packed_w, v0);
_mm_store_ps(packed_w + 4, v4);
_mm_store_ps(packed_w + 8, v1);
_mm_store_ps(packed_w + 12, v5);
_mm_store_ps(packed_w + 16, v2);
_mm_store_ps(packed_w + 20, v6);
_mm_store_ps(packed_w + 24, v3);
_mm_store_ps(packed_w + 28, v7);
packed_w += 32;
}
packed_w = (float*) ((uintptr_t) packed_w + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*packed_w++ = *b++;
} while (--nb != 0);
packed_w += (8 - n);
} else {
const __m128 vzero = _mm_setzero_ps();
_mm_store_ps(packed_w, vzero);
_mm_store_ps(packed_w + 4, vzero);
packed_w += 8;
}
// NR remainder has less than 8 rows so last row is not loaded
const float* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const float* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const float* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const float* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const float* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const float* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
size_t k = kc;
// KC multiple of 4
for (; k >= 4; k -= 4) {
// Read blocks of 4x4
// a b c d
// e f g h
// i j k l
// m n o p
__m128 v0x0123 = _mm_loadu_ps(w0);
w0 += 4;
__m128 v1x0123 = _mm_loadu_ps(w1);
w1 += 4;
__m128 v2x0123 = _mm_loadu_ps(w2);
w2 += 4;
__m128 v3x0123 = _mm_loadu_ps(w3);
w3 += 4;
__m128 v4x0123 = _mm_loadu_ps(w4);
w4 += 4;
__m128 v5x0123 = _mm_loadu_ps(w5);
w5 += 4;
__m128 v6x0123 = _mm_loadu_ps(w6);
w6 += 4;
__m128 v7x0123 = _mm_undefined_ps();
// Apply SR4 shuffle
v1x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v1x0123), _MM_SHUFFLE(0, 3, 2, 1)));
v2x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v2x0123), _MM_SHUFFLE(1, 0, 3, 2)));
v3x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v3x0123), _MM_SHUFFLE(2, 1, 0, 3)));
v5x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v5x0123), _MM_SHUFFLE(0, 3, 2, 1)));
v6x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v6x0123), _MM_SHUFFLE(1, 0, 3, 2)));
// Transpose 2x2
const __m128 vtmp0x0123 = _mm_unpacklo_ps(v0x0123, v1x0123); // a e b f from row 0, 1
const __m128 vtmp1x0123 = _mm_unpacklo_ps(v2x0123, v3x0123); // i m j n from row 2, 3
const __m128 vtmp2x0123 = _mm_unpackhi_ps(v0x0123, v1x0123); // c g d h from row 0, 1
const __m128 vtmp3x0123 = _mm_unpackhi_ps(v2x0123, v3x0123); // k o l p from row 2, 3
const __m128 vtmp4x0123 = _mm_unpacklo_ps(v4x0123, v5x0123); // a e b f from row 0, 1
const __m128 vtmp5x0123 = _mm_unpacklo_ps(v6x0123, v6x0123); // i m j n from row 2, 3
const __m128 vtmp6x0123 = _mm_unpackhi_ps(v4x0123, v5x0123); // c g d h from row 0, 1
const __m128 vtmp7x0123 = _mm_unpackhi_ps(v6x0123, v6x0123); // k o l p from row 2, 3
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
// Transpose 4x4
v0x0123 = _mm_movelh_ps(vtmp0x0123, vtmp1x0123); // a e i m from row 0, 1
v1x0123 = _mm_movehl_ps(vtmp1x0123, vtmp0x0123); // b f j n from row 0, 1
v2x0123 = _mm_movelh_ps(vtmp2x0123, vtmp3x0123); // c g k o from row 2, 3
v3x0123 = _mm_movehl_ps(vtmp3x0123, vtmp2x0123); // d h l p from row 2, 3
v4x0123 = _mm_movelh_ps(vtmp4x0123, vtmp5x0123); // a e i m from row 0, 1
v5x0123 = _mm_movehl_ps(vtmp5x0123, vtmp4x0123); // b f j n from row 0, 1
v6x0123 = _mm_movelh_ps(vtmp6x0123, vtmp7x0123); // c g k o from row 2, 3
v7x0123 = _mm_movehl_ps(vtmp7x0123, vtmp6x0123); // d h l p from row 2, 3
_mm_store_ps(packed_w, v0x0123);
_mm_store_ps(packed_w + 4, v4x0123);
_mm_store_ps(packed_w + 8, v1x0123);
_mm_store_ps(packed_w + 12, v5x0123);
_mm_store_ps(packed_w + 16, v2x0123);
_mm_store_ps(packed_w + 20, v6x0123);
_mm_store_ps(packed_w + 24, v3x0123);
_mm_store_ps(packed_w + 28, v7x0123);
packed_w += 32;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
__m128 v0 = _mm_undefined_ps();
__m128 v1 = _mm_undefined_ps();
__m128 v2 = _mm_undefined_ps();
__m128 v3 = _mm_undefined_ps();
__m128 v4 = _mm_undefined_ps();
__m128 v5 = _mm_undefined_ps();
__m128 v6 = _mm_undefined_ps();
__m128 v7 = _mm_undefined_ps();
switch (k) {
case 1:
// Read blocks of 4x1
// a
// e
// i
// m
v0 = _mm_load_ss(w0);
w0 += 1;
v1 = _mm_load_ss(w1);
w1 += 1;
v2 = _mm_load_ss(w2);
w2 += 1;
v3 = _mm_load_ss(w3);
w3 += 1;
v4 = _mm_load_ss(w4);
w4 += 1;
v5 = _mm_load_ss(w5);
w5 += 1;
v6 = _mm_load_ss(w6);
w6 += 1;
break;
case 2:
// Read blocks of 4x2
// a b
// e f
// i j
// m n
v0 = _mm_castpd_ps(_mm_load_sd((const double*) w0));
w0 += 2;
v1 = _mm_castpd_ps(_mm_load_sd((const double*) w1));
w1 += 2;
v2 = _mm_castpd_ps(_mm_load_sd((const double*) w2));
w2 += 2;
v3 = _mm_castpd_ps(_mm_load_sd((const double*) w3));
w3 += 2;
v4 = _mm_castpd_ps(_mm_load_sd((const double*) w4));
w4 += 2;
v5 = _mm_castpd_ps(_mm_load_sd((const double*) w5));
w5 += 2;
v6 = _mm_castpd_ps(_mm_load_sd((const double*) w6));
w6 += 2;
break;
case 3:
{
// Read blocks of 4x3
// a b c
// e f g
// i j k
// m n o
const __m128 v0lo = _mm_castpd_ps(_mm_load_sd((const double*) w0));
const __m128 v0hi = _mm_load_ss(w0 + 2);
v0 = _mm_movelh_ps(v0lo, v0hi);
w0 += 3;
const __m128 v1lo = _mm_castpd_ps(_mm_load_sd((const double*) w1));
const __m128 v1hi = _mm_load_ss(w1 + 2);
v1 = _mm_movelh_ps(v1lo, v1hi);
w1 += 3;
const __m128 v2lo = _mm_castpd_ps(_mm_load_sd((const double*) w2));
const __m128 v2hi = _mm_load_ss(w2 + 2);
v2 = _mm_movelh_ps(v2lo, v2hi);
w2 += 3;
const __m128 v3lo = _mm_castpd_ps(_mm_load_sd((const double*) w3));
const __m128 v3hi = _mm_load_ss(w3 + 2);
v3 = _mm_movelh_ps(v3lo, v3hi);
w3 += 3;
const __m128 v4lo = _mm_castpd_ps(_mm_load_sd((const double*) w4));
const __m128 v4hi = _mm_load_ss(w4 + 2);
v4 = _mm_movelh_ps(v4lo, v4hi);
w4 += 3;
const __m128 v5lo = _mm_castpd_ps(_mm_load_sd((const double*) w5));
const __m128 v5hi = _mm_load_ss(w5 + 2);
v5 = _mm_movelh_ps(v5lo, v5hi);
w5 += 3;
const __m128 v6lo = _mm_castpd_ps(_mm_load_sd((const double*) w6));
const __m128 v6hi = _mm_load_ss(w6 + 2);
v6 = _mm_movelh_ps(v6lo, v6hi);
w6 += 3;
break;
}
default:
XNN_UNREACHABLE;
}
// Apply SR4 shuffle
v1 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v1), _MM_SHUFFLE(0, 3, 2, 1)));
v2 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v2), _MM_SHUFFLE(1, 0, 3, 2)));
v3 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v3), _MM_SHUFFLE(2, 1, 0, 3)));
v5 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v5), _MM_SHUFFLE(0, 3, 2, 1)));
v6 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v6), _MM_SHUFFLE(1, 0, 3, 2)));
// Transpose 2x2
const __m128 vtmp0 = _mm_unpacklo_ps(v0, v1); // a e b f from row 0, 1
const __m128 vtmp1 = _mm_unpacklo_ps(v2, v3); // i m j n from row 2, 3
const __m128 vtmp2 = _mm_unpackhi_ps(v0, v1); // c g d h from row 0, 1
const __m128 vtmp3 = _mm_unpackhi_ps(v2, v3); // k o l p from row 2, 3
const __m128 vtmp4 = _mm_unpacklo_ps(v4, v5); // a e b f from row 0, 1
const __m128 vtmp5 = _mm_unpacklo_ps(v6, v6); // i m j n from row 2, 3
const __m128 vtmp6 = _mm_unpackhi_ps(v4, v5); // c g d h from row 0, 1
const __m128 vtmp7 = _mm_unpackhi_ps(v6, v6); // k o l p from row 2, 3
// Transpose 4x4
v0 = _mm_movelh_ps(vtmp0, vtmp1); // a e i m from row 0, 1
v1 = _mm_movehl_ps(vtmp1, vtmp0); // b f j n from row 0, 1
v2 = _mm_movelh_ps(vtmp2, vtmp3); // c g k o from row 2, 3
v3 = _mm_movehl_ps(vtmp3, vtmp2); // d h l p from row 2, 3
v4 = _mm_movelh_ps(vtmp4, vtmp5); // a e i m from row 0, 1
v5 = _mm_movehl_ps(vtmp5, vtmp4); // b f j n from row 0, 1
v6 = _mm_movelh_ps(vtmp6, vtmp7); // c g k o from row 2, 3
v7 = _mm_movehl_ps(vtmp7, vtmp6); // d h l p from row 2, 3
_mm_store_ps(packed_w, v0);
_mm_store_ps(packed_w + 4, v4);
_mm_store_ps(packed_w + 8, v1);
_mm_store_ps(packed_w + 12, v5);
_mm_store_ps(packed_w + 16, v2);
_mm_store_ps(packed_w + 20, v6);
_mm_store_ps(packed_w + 24, v3);
_mm_store_ps(packed_w + 28, v7);
packed_w += 32;
}
packed_w = (float*) ((uintptr_t) packed_w + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 22,316
| 39.429348
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x8s4-gemm-goi-sse2-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/s4-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x8s4__sse2_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 4);
assert(weights != NULL);
assert(packed_weights != NULL);
const float* b = (const float*) bias;
float* packed_w = (float*) packed_weights;
do {
// NC main loop multiple of 8
const float* w0 = (const float*) weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(b != NULL) {
const __m128 vb0 = _mm_loadu_ps(b);
const __m128 vb4 = _mm_loadu_ps(b + 4);
_mm_store_ps(packed_w, vb0);
_mm_store_ps(packed_w + 4, vb4);
b += 8;
} else {
const __m128 vzero = _mm_setzero_ps();
_mm_store_ps(packed_w, vzero);
_mm_store_ps(packed_w + 4, vzero);
}
packed_w += 8;
const float* w1 = w0 + kc;
const float* w2 = w1 + kc;
const float* w3 = w2 + kc;
const float* w4 = w3 + kc;
const float* w5 = w4 + kc;
const float* w6 = w5 + kc;
const float* w7 = w6 + kc;
size_t k = kc;
// KC multiple of 4
for (; k >= 4; k -= 4) {
// Read blocks of 4x4
// a b c d
// e f g h
// i j k l
// m n o p
__m128 v0x0123 = _mm_loadu_ps(w0);
w0 += 4;
__m128 v1x0123 = _mm_loadu_ps(w1);
w1 += 4;
__m128 v2x0123 = _mm_loadu_ps(w2);
w2 += 4;
__m128 v3x0123 = _mm_loadu_ps(w3);
w3 += 4;
__m128 v4x0123 = _mm_loadu_ps(w4);
w4 += 4;
__m128 v5x0123 = _mm_loadu_ps(w5);
w5 += 4;
__m128 v6x0123 = _mm_loadu_ps(w6);
w6 += 4;
__m128 v7x0123 = _mm_loadu_ps(w7);
w7 += 4;
// Apply SR4 shuffle
v1x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v1x0123), _MM_SHUFFLE(0, 3, 2, 1)));
v2x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v2x0123), _MM_SHUFFLE(1, 0, 3, 2)));
v3x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v3x0123), _MM_SHUFFLE(2, 1, 0, 3)));
v5x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v5x0123), _MM_SHUFFLE(0, 3, 2, 1)));
v6x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v6x0123), _MM_SHUFFLE(1, 0, 3, 2)));
v7x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v7x0123), _MM_SHUFFLE(2, 1, 0, 3)));
// Transpose 2x2
const __m128 vtmp0x0123 = _mm_unpacklo_ps(v0x0123, v1x0123); // a e b f from row 0, 1
const __m128 vtmp1x0123 = _mm_unpacklo_ps(v2x0123, v3x0123); // i m j n from row 2, 3
const __m128 vtmp2x0123 = _mm_unpackhi_ps(v0x0123, v1x0123); // c g d h from row 0, 1
const __m128 vtmp3x0123 = _mm_unpackhi_ps(v2x0123, v3x0123); // k o l p from row 2, 3
const __m128 vtmp4x0123 = _mm_unpacklo_ps(v4x0123, v5x0123); // a e b f from row 0, 1
const __m128 vtmp5x0123 = _mm_unpacklo_ps(v6x0123, v7x0123); // i m j n from row 2, 3
const __m128 vtmp6x0123 = _mm_unpackhi_ps(v4x0123, v5x0123); // c g d h from row 0, 1
const __m128 vtmp7x0123 = _mm_unpackhi_ps(v6x0123, v7x0123); // k o l p from row 2, 3
// Transpose 4x4
v0x0123 = _mm_movelh_ps(vtmp0x0123, vtmp1x0123); // a e i m from row 0, 1
v1x0123 = _mm_movehl_ps(vtmp1x0123, vtmp0x0123); // b f j n from row 0, 1
v2x0123 = _mm_movelh_ps(vtmp2x0123, vtmp3x0123); // c g k o from row 2, 3
v3x0123 = _mm_movehl_ps(vtmp3x0123, vtmp2x0123); // d h l p from row 2, 3
v4x0123 = _mm_movelh_ps(vtmp4x0123, vtmp5x0123); // a e i m from row 0, 1
v5x0123 = _mm_movehl_ps(vtmp5x0123, vtmp4x0123); // b f j n from row 0, 1
v6x0123 = _mm_movelh_ps(vtmp6x0123, vtmp7x0123); // c g k o from row 2, 3
v7x0123 = _mm_movehl_ps(vtmp7x0123, vtmp6x0123); // d h l p from row 2, 3
_mm_store_ps(packed_w, v0x0123);
_mm_store_ps(packed_w + 4, v4x0123);
_mm_store_ps(packed_w + 8, v1x0123);
_mm_store_ps(packed_w + 12, v5x0123);
_mm_store_ps(packed_w + 16, v2x0123);
_mm_store_ps(packed_w + 20, v6x0123);
_mm_store_ps(packed_w + 24, v3x0123);
_mm_store_ps(packed_w + 28, v7x0123);
packed_w += 32;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
__m128 v0 = _mm_undefined_ps();
__m128 v1 = _mm_undefined_ps();
__m128 v2 = _mm_undefined_ps();
__m128 v3 = _mm_undefined_ps();
__m128 v4 = _mm_undefined_ps();
__m128 v5 = _mm_undefined_ps();
__m128 v6 = _mm_undefined_ps();
__m128 v7 = _mm_undefined_ps();
switch (k) {
case 1:
// Read blocks of 4x1
// a
// e
// i
// m
v0 = _mm_load_ss(w0);
w0 += 1;
v1 = _mm_load_ss(w1);
w1 += 1;
v2 = _mm_load_ss(w2);
w2 += 1;
v3 = _mm_load_ss(w3);
w3 += 1;
v4 = _mm_load_ss(w4);
w4 += 1;
v5 = _mm_load_ss(w5);
w5 += 1;
v6 = _mm_load_ss(w6);
w6 += 1;
v7 = _mm_load_ss(w7);
w7 += 1;
break;
case 2:
// Read blocks of 4x2
// a b
// e f
// i j
// m n
v0 = _mm_castpd_ps(_mm_load_sd((const double*) w0));
w0 += 2;
v1 = _mm_castpd_ps(_mm_load_sd((const double*) w1));
w1 += 2;
v2 = _mm_castpd_ps(_mm_load_sd((const double*) w2));
w2 += 2;
v3 = _mm_castpd_ps(_mm_load_sd((const double*) w3));
w3 += 2;
v4 = _mm_castpd_ps(_mm_load_sd((const double*) w4));
w4 += 2;
v5 = _mm_castpd_ps(_mm_load_sd((const double*) w5));
w5 += 2;
v6 = _mm_castpd_ps(_mm_load_sd((const double*) w6));
w6 += 2;
v7 = _mm_castpd_ps(_mm_load_sd((const double*) w7));
w7 += 2;
break;
case 3:
{
// Read blocks of 4x3
// a b c
// e f g
// i j k
// m n o
const __m128 v0lo = _mm_castpd_ps(_mm_load_sd((const double*) w0));
const __m128 v0hi = _mm_load_ss(w0 + 2);
v0 = _mm_movelh_ps(v0lo, v0hi);
w0 += 3;
const __m128 v1lo = _mm_castpd_ps(_mm_load_sd((const double*) w1));
const __m128 v1hi = _mm_load_ss(w1 + 2);
v1 = _mm_movelh_ps(v1lo, v1hi);
w1 += 3;
const __m128 v2lo = _mm_castpd_ps(_mm_load_sd((const double*) w2));
const __m128 v2hi = _mm_load_ss(w2 + 2);
v2 = _mm_movelh_ps(v2lo, v2hi);
w2 += 3;
const __m128 v3lo = _mm_castpd_ps(_mm_load_sd((const double*) w3));
const __m128 v3hi = _mm_load_ss(w3 + 2);
v3 = _mm_movelh_ps(v3lo, v3hi);
w3 += 3;
const __m128 v4lo = _mm_castpd_ps(_mm_load_sd((const double*) w4));
const __m128 v4hi = _mm_load_ss(w4 + 2);
v4 = _mm_movelh_ps(v4lo, v4hi);
w4 += 3;
const __m128 v5lo = _mm_castpd_ps(_mm_load_sd((const double*) w5));
const __m128 v5hi = _mm_load_ss(w5 + 2);
v5 = _mm_movelh_ps(v5lo, v5hi);
w5 += 3;
const __m128 v6lo = _mm_castpd_ps(_mm_load_sd((const double*) w6));
const __m128 v6hi = _mm_load_ss(w6 + 2);
v6 = _mm_movelh_ps(v6lo, v6hi);
w6 += 3;
const __m128 v7lo = _mm_castpd_ps(_mm_load_sd((const double*) w7));
const __m128 v7hi = _mm_load_ss(w7 + 2);
v7 = _mm_movelh_ps(v7lo, v7hi);
w7 += 3;
break;
}
default:
XNN_UNREACHABLE;
}
// Apply SR4 shuffle
v1 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v1), _MM_SHUFFLE(0, 3, 2, 1)));
v2 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v2), _MM_SHUFFLE(1, 0, 3, 2)));
v3 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v3), _MM_SHUFFLE(2, 1, 0, 3)));
v5 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v5), _MM_SHUFFLE(0, 3, 2, 1)));
v6 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v6), _MM_SHUFFLE(1, 0, 3, 2)));
v7 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v7), _MM_SHUFFLE(2, 1, 0, 3)));
// Transpose 2x2
const __m128 vtmp0 = _mm_unpacklo_ps(v0, v1); // a e b f from row 0, 1
const __m128 vtmp1 = _mm_unpacklo_ps(v2, v3); // i m j n from row 2, 3
const __m128 vtmp2 = _mm_unpackhi_ps(v0, v1); // c g d h from row 0, 1
const __m128 vtmp3 = _mm_unpackhi_ps(v2, v3); // k o l p from row 2, 3
const __m128 vtmp4 = _mm_unpacklo_ps(v4, v5); // a e b f from row 0, 1
const __m128 vtmp5 = _mm_unpacklo_ps(v6, v7); // i m j n from row 2, 3
const __m128 vtmp6 = _mm_unpackhi_ps(v4, v5); // c g d h from row 0, 1
const __m128 vtmp7 = _mm_unpackhi_ps(v6, v7); // k o l p from row 2, 3
// Transpose 4x4
v0 = _mm_movelh_ps(vtmp0, vtmp1); // a e i m from row 0, 1
v1 = _mm_movehl_ps(vtmp1, vtmp0); // b f j n from row 0, 1
v2 = _mm_movelh_ps(vtmp2, vtmp3); // c g k o from row 2, 3
v3 = _mm_movehl_ps(vtmp3, vtmp2); // d h l p from row 2, 3
v4 = _mm_movelh_ps(vtmp4, vtmp5); // a e i m from row 0, 1
v5 = _mm_movehl_ps(vtmp5, vtmp4); // b f j n from row 0, 1
v6 = _mm_movelh_ps(vtmp6, vtmp7); // c g k o from row 2, 3
v7 = _mm_movehl_ps(vtmp7, vtmp6); // d h l p from row 2, 3
_mm_store_ps(packed_w, v0);
_mm_store_ps(packed_w + 4, v4);
_mm_store_ps(packed_w + 8, v1);
_mm_store_ps(packed_w + 12, v5);
_mm_store_ps(packed_w + 16, v2);
_mm_store_ps(packed_w + 20, v6);
_mm_store_ps(packed_w + 24, v3);
_mm_store_ps(packed_w + 28, v7);
packed_w += 32;
}
packed_w = (float*) ((uintptr_t) packed_w + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*packed_w++ = *b++;
} while (--nb != 0);
packed_w += (8 - n);
} else {
const __m128 vzero = _mm_setzero_ps();
_mm_store_ps(packed_w, vzero);
_mm_store_ps(packed_w + 4, vzero);
packed_w += 8;
}
// NR remainder has less than 8 rows so last row is not loaded
const float* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const float* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const float* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const float* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const float* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const float* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
size_t k = kc;
// KC multiple of 4
for (; k >= 4; k -= 4) {
// Read blocks of 4x4
// a b c d
// e f g h
// i j k l
// m n o p
__m128 v0x0123 = _mm_loadu_ps(w0);
w0 += 4;
__m128 v1x0123 = _mm_loadu_ps(w1);
w1 += 4;
__m128 v2x0123 = _mm_loadu_ps(w2);
w2 += 4;
__m128 v3x0123 = _mm_loadu_ps(w3);
w3 += 4;
__m128 v4x0123 = _mm_loadu_ps(w4);
w4 += 4;
__m128 v5x0123 = _mm_loadu_ps(w5);
w5 += 4;
__m128 v6x0123 = _mm_loadu_ps(w6);
w6 += 4;
__m128 v7x0123 = _mm_undefined_ps();
// Apply SR4 shuffle
v1x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v1x0123), _MM_SHUFFLE(0, 3, 2, 1)));
v2x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v2x0123), _MM_SHUFFLE(1, 0, 3, 2)));
v3x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v3x0123), _MM_SHUFFLE(2, 1, 0, 3)));
v5x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v5x0123), _MM_SHUFFLE(0, 3, 2, 1)));
v6x0123 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v6x0123), _MM_SHUFFLE(1, 0, 3, 2)));
// Transpose 2x2
const __m128 vtmp0x0123 = _mm_unpacklo_ps(v0x0123, v1x0123); // a e b f from row 0, 1
const __m128 vtmp1x0123 = _mm_unpacklo_ps(v2x0123, v3x0123); // i m j n from row 2, 3
const __m128 vtmp2x0123 = _mm_unpackhi_ps(v0x0123, v1x0123); // c g d h from row 0, 1
const __m128 vtmp3x0123 = _mm_unpackhi_ps(v2x0123, v3x0123); // k o l p from row 2, 3
const __m128 vtmp4x0123 = _mm_unpacklo_ps(v4x0123, v5x0123); // a e b f from row 0, 1
const __m128 vtmp5x0123 = _mm_unpacklo_ps(v6x0123, v6x0123); // i m j n from row 2, 3
const __m128 vtmp6x0123 = _mm_unpackhi_ps(v4x0123, v5x0123); // c g d h from row 0, 1
const __m128 vtmp7x0123 = _mm_unpackhi_ps(v6x0123, v6x0123); // k o l p from row 2, 3
// Transpose 4x4
v0x0123 = _mm_movelh_ps(vtmp0x0123, vtmp1x0123); // a e i m from row 0, 1
v1x0123 = _mm_movehl_ps(vtmp1x0123, vtmp0x0123); // b f j n from row 0, 1
v2x0123 = _mm_movelh_ps(vtmp2x0123, vtmp3x0123); // c g k o from row 2, 3
v3x0123 = _mm_movehl_ps(vtmp3x0123, vtmp2x0123); // d h l p from row 2, 3
v4x0123 = _mm_movelh_ps(vtmp4x0123, vtmp5x0123); // a e i m from row 0, 1
v5x0123 = _mm_movehl_ps(vtmp5x0123, vtmp4x0123); // b f j n from row 0, 1
v6x0123 = _mm_movelh_ps(vtmp6x0123, vtmp7x0123); // c g k o from row 2, 3
v7x0123 = _mm_movehl_ps(vtmp7x0123, vtmp6x0123); // d h l p from row 2, 3
_mm_store_ps(packed_w, v0x0123);
_mm_store_ps(packed_w + 4, v4x0123);
_mm_store_ps(packed_w + 8, v1x0123);
_mm_store_ps(packed_w + 12, v5x0123);
_mm_store_ps(packed_w + 16, v2x0123);
_mm_store_ps(packed_w + 20, v6x0123);
_mm_store_ps(packed_w + 24, v3x0123);
_mm_store_ps(packed_w + 28, v7x0123);
packed_w += 32;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
__m128 v0 = _mm_undefined_ps();
__m128 v1 = _mm_undefined_ps();
__m128 v2 = _mm_undefined_ps();
__m128 v3 = _mm_undefined_ps();
__m128 v4 = _mm_undefined_ps();
__m128 v5 = _mm_undefined_ps();
__m128 v6 = _mm_undefined_ps();
__m128 v7 = _mm_undefined_ps();
switch (k) {
case 1:
// Read blocks of 4x1
// a
// e
// i
// m
v0 = _mm_load_ss(w0);
w0 += 1;
v1 = _mm_load_ss(w1);
w1 += 1;
v2 = _mm_load_ss(w2);
w2 += 1;
v3 = _mm_load_ss(w3);
w3 += 1;
v4 = _mm_load_ss(w4);
w4 += 1;
v5 = _mm_load_ss(w5);
w5 += 1;
v6 = _mm_load_ss(w6);
w6 += 1;
break;
case 2:
// Read blocks of 4x2
// a b
// e f
// i j
// m n
v0 = _mm_castpd_ps(_mm_load_sd((const double*) w0));
w0 += 2;
v1 = _mm_castpd_ps(_mm_load_sd((const double*) w1));
w1 += 2;
v2 = _mm_castpd_ps(_mm_load_sd((const double*) w2));
w2 += 2;
v3 = _mm_castpd_ps(_mm_load_sd((const double*) w3));
w3 += 2;
v4 = _mm_castpd_ps(_mm_load_sd((const double*) w4));
w4 += 2;
v5 = _mm_castpd_ps(_mm_load_sd((const double*) w5));
w5 += 2;
v6 = _mm_castpd_ps(_mm_load_sd((const double*) w6));
w6 += 2;
break;
case 3:
{
// Read blocks of 4x3
// a b c
// e f g
// i j k
// m n o
const __m128 v0lo = _mm_castpd_ps(_mm_load_sd((const double*) w0));
const __m128 v0hi = _mm_load_ss(w0 + 2);
v0 = _mm_movelh_ps(v0lo, v0hi);
w0 += 3;
const __m128 v1lo = _mm_castpd_ps(_mm_load_sd((const double*) w1));
const __m128 v1hi = _mm_load_ss(w1 + 2);
v1 = _mm_movelh_ps(v1lo, v1hi);
w1 += 3;
const __m128 v2lo = _mm_castpd_ps(_mm_load_sd((const double*) w2));
const __m128 v2hi = _mm_load_ss(w2 + 2);
v2 = _mm_movelh_ps(v2lo, v2hi);
w2 += 3;
const __m128 v3lo = _mm_castpd_ps(_mm_load_sd((const double*) w3));
const __m128 v3hi = _mm_load_ss(w3 + 2);
v3 = _mm_movelh_ps(v3lo, v3hi);
w3 += 3;
const __m128 v4lo = _mm_castpd_ps(_mm_load_sd((const double*) w4));
const __m128 v4hi = _mm_load_ss(w4 + 2);
v4 = _mm_movelh_ps(v4lo, v4hi);
w4 += 3;
const __m128 v5lo = _mm_castpd_ps(_mm_load_sd((const double*) w5));
const __m128 v5hi = _mm_load_ss(w5 + 2);
v5 = _mm_movelh_ps(v5lo, v5hi);
w5 += 3;
const __m128 v6lo = _mm_castpd_ps(_mm_load_sd((const double*) w6));
const __m128 v6hi = _mm_load_ss(w6 + 2);
v6 = _mm_movelh_ps(v6lo, v6hi);
w6 += 3;
break;
}
default:
XNN_UNREACHABLE;
}
// Apply SR4 shuffle
v1 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v1), _MM_SHUFFLE(0, 3, 2, 1)));
v2 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v2), _MM_SHUFFLE(1, 0, 3, 2)));
v3 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v3), _MM_SHUFFLE(2, 1, 0, 3)));
v5 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v5), _MM_SHUFFLE(0, 3, 2, 1)));
v6 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v6), _MM_SHUFFLE(1, 0, 3, 2)));
// Transpose 2x2
const __m128 vtmp0 = _mm_unpacklo_ps(v0, v1); // a e b f from row 0, 1
const __m128 vtmp1 = _mm_unpacklo_ps(v2, v3); // i m j n from row 2, 3
const __m128 vtmp2 = _mm_unpackhi_ps(v0, v1); // c g d h from row 0, 1
const __m128 vtmp3 = _mm_unpackhi_ps(v2, v3); // k o l p from row 2, 3
const __m128 vtmp4 = _mm_unpacklo_ps(v4, v5); // a e b f from row 0, 1
const __m128 vtmp5 = _mm_unpacklo_ps(v6, v6); // i m j n from row 2, 3
const __m128 vtmp6 = _mm_unpackhi_ps(v4, v5); // c g d h from row 0, 1
const __m128 vtmp7 = _mm_unpackhi_ps(v6, v6); // k o l p from row 2, 3
// Transpose 4x4
v0 = _mm_movelh_ps(vtmp0, vtmp1); // a e i m from row 0, 1
v1 = _mm_movehl_ps(vtmp1, vtmp0); // b f j n from row 0, 1
v2 = _mm_movelh_ps(vtmp2, vtmp3); // c g k o from row 2, 3
v3 = _mm_movehl_ps(vtmp3, vtmp2); // d h l p from row 2, 3
v4 = _mm_movelh_ps(vtmp4, vtmp5); // a e i m from row 0, 1
v5 = _mm_movehl_ps(vtmp5, vtmp4); // b f j n from row 0, 1
v6 = _mm_movelh_ps(vtmp6, vtmp7); // c g k o from row 2, 3
v7 = _mm_movehl_ps(vtmp7, vtmp6); // d h l p from row 2, 3
_mm_store_ps(packed_w, v0);
_mm_store_ps(packed_w + 4, v4);
_mm_store_ps(packed_w + 8, v1);
_mm_store_ps(packed_w + 12, v5);
_mm_store_ps(packed_w + 16, v2);
_mm_store_ps(packed_w + 20, v6);
_mm_store_ps(packed_w + 24, v3);
_mm_store_ps(packed_w + 28, v7);
packed_w += 32;
}
packed_w = (float*) ((uintptr_t) packed_w + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 20,695
| 38.8
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packw/gen/x32-packw-x8s4-gemm-goi-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/s4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/packw.h>
void xnn_x32_packw_gemm_goi_ukernel_x8s4__wasmsimd_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 4);
assert(weights != NULL);
assert(packed_weights != NULL);
do {
// NC main loop multiple of 8
const uint32_t* w0 = (const uint32_t*) weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(bias != NULL) {
const v128_t vb0123 = wasm_v128_load(bias);
const v128_t vb4567 = wasm_v128_load(bias + 4);
bias += 8;
wasm_v128_store(packed_weights, vb0123);
wasm_v128_store(packed_weights + 4, vb4567);
} else {
const v128_t vzero = wasm_i32x4_const_splat(0);
wasm_v128_store(packed_weights, vzero);
wasm_v128_store(packed_weights + 4, vzero);
}
packed_weights += 8;
const uint32_t* w1 = w0 + kc;
const uint32_t* w2 = w1 + kc;
const uint32_t* w3 = w2 + kc;
const uint32_t* w4 = w3 + kc;
const uint32_t* w5 = w4 + kc;
const uint32_t* w6 = w5 + kc;
const uint32_t* w7 = w6 + kc;
// KC main loop multiple of 8x4
size_t k = kc;
for (; k >= 4; k -= 4) {
v128_t v0x0123 = wasm_v128_load(w0);
w0 += 4;
v128_t v1x0123 = wasm_v128_load(w1);
w1 += 4;
v128_t v2x0123 = wasm_v128_load(w2);
w2 += 4;
v128_t v3x0123 = wasm_v128_load(w3);
w3 += 4;
v128_t v4x0123 = wasm_v128_load(w4);
w4 += 4;
v128_t v5x0123 = wasm_v128_load(w5);
w5 += 4;
v128_t v6x0123 = wasm_v128_load(w6);
w6 += 4;
v128_t v7x0123 = wasm_v128_load(w7);
w7 += 4;
v1x0123 = wasm_v32x4_shuffle(v1x0123, v1x0123, 1, 2, 3, 0);
v2x0123 = wasm_v32x4_shuffle(v2x0123, v2x0123, 2, 3, 0, 1);
v3x0123 = wasm_v32x4_shuffle(v3x0123, v3x0123, 3, 0, 1, 2);
v5x0123 = wasm_v32x4_shuffle(v5x0123, v5x0123, 1, 2, 3, 0);
v6x0123 = wasm_v32x4_shuffle(v6x0123, v6x0123, 2, 3, 0, 1);
v7x0123 = wasm_v32x4_shuffle(v7x0123, v7x0123, 3, 0, 1, 2);
const v128_t v01x0_01x1 = wasm_v32x4_shuffle(v0x0123, v1x0123, 0, 4, 1, 5);
const v128_t v23x0_23x1 = wasm_v32x4_shuffle(v2x0123, v3x0123, 0, 4, 1, 5);
const v128_t v01x2_01x3 = wasm_v32x4_shuffle(v0x0123, v1x0123, 2, 6, 3, 7);
const v128_t v23x2_23x3 = wasm_v32x4_shuffle(v2x0123, v3x0123, 2, 6, 3, 7);
const v128_t v45x0_45x1 = wasm_v32x4_shuffle(v4x0123, v5x0123, 0, 4, 1, 5);
const v128_t v67x0_67x1 = wasm_v32x4_shuffle(v6x0123, v7x0123, 0, 4, 1, 5);
const v128_t v45x2_45x3 = wasm_v32x4_shuffle(v4x0123, v5x0123, 2, 6, 3, 7);
const v128_t v67x2_67x3 = wasm_v32x4_shuffle(v6x0123, v7x0123, 2, 6, 3, 7);
const v128_t v0123x0 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 0, 2);
const v128_t v0123x1 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 1, 3);
const v128_t v0123x2 = wasm_v64x2_shuffle(v01x2_01x3, v23x2_23x3, 0, 2);
const v128_t v0123x3 = wasm_v64x2_shuffle(v01x2_01x3, v23x2_23x3, 1, 3);
const v128_t v4567x0 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 0, 2);
const v128_t v4567x1 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 1, 3);
const v128_t v4567x2 = wasm_v64x2_shuffle(v45x2_45x3, v67x2_67x3, 0, 2);
const v128_t v4567x3 = wasm_v64x2_shuffle(v45x2_45x3, v67x2_67x3, 1, 3);
wasm_v128_store(packed_weights, v0123x0);
wasm_v128_store(packed_weights + 4, v4567x0);
wasm_v128_store(packed_weights + 8, v0123x1);
wasm_v128_store(packed_weights + 12, v4567x1);
wasm_v128_store(packed_weights + 16, v0123x2);
wasm_v128_store(packed_weights + 20, v4567x2);
wasm_v128_store(packed_weights + 24, v0123x3);
wasm_v128_store(packed_weights + 28, v4567x3);
packed_weights += 32;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
v128_t v0x0123 = wasm_i32x4_const_splat(0);
v128_t v1x0123 = wasm_i32x4_const_splat(0);
v128_t v2x0123 = wasm_i32x4_const_splat(0);
v128_t v3x0123 = wasm_i32x4_const_splat(0);
v128_t v4x0123 = wasm_i32x4_const_splat(0);
v128_t v5x0123 = wasm_i32x4_const_splat(0);
v128_t v6x0123 = wasm_i32x4_const_splat(0);
v128_t v7x0123 = wasm_i32x4_const_splat(0);
switch (k) {
case 1:
v0x0123 = wasm_v128_load32_zero(w0);
w0 += 1;
v1x0123 = wasm_v128_load32_zero(w1);
w1 += 1;
v2x0123 = wasm_v128_load32_zero(w2);
w2 += 1;
v3x0123 = wasm_v128_load32_zero(w3);
w3 += 1;
v4x0123 = wasm_v128_load32_zero(w4);
w4 += 1;
v5x0123 = wasm_v128_load32_zero(w5);
w5 += 1;
v6x0123 = wasm_v128_load32_zero(w6);
w6 += 1;
v7x0123 = wasm_v128_load32_zero(w7);
w7 += 1;
break;
case 2:
v0x0123 = wasm_v128_load64_zero(w0);
w0 += 2;
v1x0123 = wasm_v128_load64_zero(w1);
w1 += 2;
v2x0123 = wasm_v128_load64_zero(w2);
w2 += 2;
v3x0123 = wasm_v128_load64_zero(w3);
w3 += 2;
v4x0123 = wasm_v128_load64_zero(w4);
w4 += 2;
v5x0123 = wasm_v128_load64_zero(w5);
w5 += 2;
v6x0123 = wasm_v128_load64_zero(w6);
w6 += 2;
v7x0123 = wasm_v128_load64_zero(w7);
w7 += 2;
break;
case 3:
v0x0123 = wasm_v128_load64_zero(w0);
w0 += 2;
v1x0123 = wasm_v128_load64_zero(w1);
w1 += 2;
v2x0123 = wasm_v128_load64_zero(w2);
w2 += 2;
v3x0123 = wasm_v128_load64_zero(w3);
w3 += 2;
v4x0123 = wasm_v128_load64_zero(w4);
w4 += 2;
v5x0123 = wasm_v128_load64_zero(w5);
w5 += 2;
v6x0123 = wasm_v128_load64_zero(w6);
w6 += 2;
v7x0123 = wasm_v128_load64_zero(w7);
w7 += 2;
v0x0123 = wasm_v128_load32_lane(w0, v0x0123, 2);
w0 += 1;
v1x0123 = wasm_v128_load32_lane(w1, v1x0123, 2);
w1 += 1;
v2x0123 = wasm_v128_load32_lane(w2, v2x0123, 2);
w2 += 1;
v3x0123 = wasm_v128_load32_lane(w3, v3x0123, 2);
w3 += 1;
v4x0123 = wasm_v128_load32_lane(w4, v4x0123, 2);
w4 += 1;
v5x0123 = wasm_v128_load32_lane(w5, v5x0123, 2);
w5 += 1;
v6x0123 = wasm_v128_load32_lane(w6, v6x0123, 2);
w6 += 1;
v7x0123 = wasm_v128_load32_lane(w7, v7x0123, 2);
w7 += 1;
break;
default:
XNN_UNREACHABLE;
}
v1x0123 = wasm_v32x4_shuffle(v1x0123, v1x0123, 1, 2, 3, 0);
v2x0123 = wasm_v32x4_shuffle(v2x0123, v2x0123, 2, 3, 0, 1);
v3x0123 = wasm_v32x4_shuffle(v3x0123, v3x0123, 3, 0, 1, 2);
v5x0123 = wasm_v32x4_shuffle(v5x0123, v5x0123, 1, 2, 3, 0);
v6x0123 = wasm_v32x4_shuffle(v6x0123, v6x0123, 2, 3, 0, 1);
v7x0123 = wasm_v32x4_shuffle(v7x0123, v7x0123, 3, 0, 1, 2);
const v128_t v01x0_01x1 = wasm_v32x4_shuffle(v0x0123, v1x0123, 0, 4, 1, 5);
const v128_t v23x0_23x1 = wasm_v32x4_shuffle(v2x0123, v3x0123, 0, 4, 1, 5);
const v128_t v01x2_01x3 = wasm_v32x4_shuffle(v0x0123, v1x0123, 2, 6, 3, 7);
const v128_t v23x2_23x3 = wasm_v32x4_shuffle(v2x0123, v3x0123, 2, 6, 3, 7);
const v128_t v45x0_45x1 = wasm_v32x4_shuffle(v4x0123, v5x0123, 0, 4, 1, 5);
const v128_t v67x0_67x1 = wasm_v32x4_shuffle(v6x0123, v7x0123, 0, 4, 1, 5);
const v128_t v45x2_45x3 = wasm_v32x4_shuffle(v4x0123, v5x0123, 2, 6, 3, 7);
const v128_t v67x2_67x3 = wasm_v32x4_shuffle(v6x0123, v7x0123, 2, 6, 3, 7);
const v128_t v0123x0 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 0, 2);
const v128_t v0123x1 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 1, 3);
const v128_t v0123x2 = wasm_v64x2_shuffle(v01x2_01x3, v23x2_23x3, 0, 2);
const v128_t v0123x3 = wasm_v64x2_shuffle(v01x2_01x3, v23x2_23x3, 1, 3);
const v128_t v4567x0 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 0, 2);
const v128_t v4567x1 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 1, 3);
const v128_t v4567x2 = wasm_v64x2_shuffle(v45x2_45x3, v67x2_67x3, 0, 2);
const v128_t v4567x3 = wasm_v64x2_shuffle(v45x2_45x3, v67x2_67x3, 1, 3);
wasm_v128_store(packed_weights, v0123x0);
wasm_v128_store(packed_weights + 4, v4567x0);
wasm_v128_store(packed_weights + 8, v0123x1);
wasm_v128_store(packed_weights + 12, v4567x1);
wasm_v128_store(packed_weights + 16, v0123x2);
wasm_v128_store(packed_weights + 20, v4567x2);
wasm_v128_store(packed_weights + 24, v0123x3);
wasm_v128_store(packed_weights + 28, v4567x3);
packed_weights += 32;
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (8 - n);
} else {
const v128_t vzero = wasm_i32x4_const_splat(0);
wasm_v128_store(packed_weights, vzero);
wasm_v128_store(packed_weights + 4, vzero);
packed_weights += 8;
}
const uint32_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint32_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint32_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint32_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint32_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint32_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
size_t k = kc;
for (; k >= 4; k -= 4) {
v128_t v0x0123 = wasm_v128_load(w0);
w0 += 4;
v128_t v1x0123 = wasm_v128_load(w1);
w1 += 4;
v128_t v2x0123 = wasm_v128_load(w2);
w2 += 4;
v128_t v3x0123 = wasm_v128_load(w3);
w3 += 4;
v128_t v4x0123 = wasm_v128_load(w4);
w4 += 4;
v128_t v5x0123 = wasm_v128_load(w5);
w5 += 4;
v128_t v6x0123 = wasm_v128_load(w6);
w6 += 4;
v1x0123 = wasm_v32x4_shuffle(v1x0123, v1x0123, 1, 2, 3, 0);
v2x0123 = wasm_v32x4_shuffle(v2x0123, v2x0123, 2, 3, 0, 1);
v3x0123 = wasm_v32x4_shuffle(v3x0123, v3x0123, 3, 0, 1, 2);
v5x0123 = wasm_v32x4_shuffle(v5x0123, v5x0123, 1, 2, 3, 0);
v6x0123 = wasm_v32x4_shuffle(v6x0123, v6x0123, 2, 3, 0, 1);
const v128_t v01x0_01x1 = wasm_v32x4_shuffle(v0x0123, v1x0123, 0, 4, 1, 5);
const v128_t v23x0_23x1 = wasm_v32x4_shuffle(v2x0123, v3x0123, 0, 4, 1, 5);
const v128_t v01x2_01x3 = wasm_v32x4_shuffle(v0x0123, v1x0123, 2, 6, 3, 7);
const v128_t v23x2_23x3 = wasm_v32x4_shuffle(v2x0123, v3x0123, 2, 6, 3, 7);
const v128_t v45x0_45x1 = wasm_v32x4_shuffle(v4x0123, v5x0123, 0, 4, 1, 5);
const v128_t v67x0_67x1 = wasm_v32x4_shuffle(v6x0123, v6x0123, 0, 4, 1, 5);
const v128_t v45x2_45x3 = wasm_v32x4_shuffle(v4x0123, v5x0123, 2, 6, 3, 7);
const v128_t v67x2_67x3 = wasm_v32x4_shuffle(v6x0123, v6x0123, 2, 6, 3, 7);
const v128_t v0123x0 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 0, 2);
const v128_t v0123x1 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 1, 3);
const v128_t v0123x2 = wasm_v64x2_shuffle(v01x2_01x3, v23x2_23x3, 0, 2);
const v128_t v0123x3 = wasm_v64x2_shuffle(v01x2_01x3, v23x2_23x3, 1, 3);
const v128_t v4567x0 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 0, 2);
const v128_t v4567x1 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 1, 3);
const v128_t v4567x2 = wasm_v64x2_shuffle(v45x2_45x3, v67x2_67x3, 0, 2);
const v128_t v4567x3 = wasm_v64x2_shuffle(v45x2_45x3, v67x2_67x3, 1, 3);
wasm_v128_store(packed_weights, v0123x0);
wasm_v128_store(packed_weights + 4, v4567x0);
wasm_v128_store(packed_weights + 8, v0123x1);
wasm_v128_store(packed_weights + 12, v4567x1);
wasm_v128_store(packed_weights + 16, v0123x2);
wasm_v128_store(packed_weights + 20, v4567x2);
wasm_v128_store(packed_weights + 24, v0123x3);
wasm_v128_store(packed_weights + 28, v4567x3);
packed_weights += 32;
}
// KC remainder (1..3)
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
v128_t v0x0123 = wasm_i32x4_const_splat(0);
v128_t v1x0123 = wasm_i32x4_const_splat(0);
v128_t v2x0123 = wasm_i32x4_const_splat(0);
v128_t v3x0123 = wasm_i32x4_const_splat(0);
v128_t v4x0123 = wasm_i32x4_const_splat(0);
v128_t v5x0123 = wasm_i32x4_const_splat(0);
v128_t v6x0123 = wasm_i32x4_const_splat(0);
switch (k) {
case 1:
v0x0123 = wasm_v128_load32_zero(w0);
w0 += 1;
v1x0123 = wasm_v128_load32_zero(w1);
w1 += 1;
v2x0123 = wasm_v128_load32_zero(w2);
w2 += 1;
v3x0123 = wasm_v128_load32_zero(w3);
w3 += 1;
v4x0123 = wasm_v128_load32_zero(w4);
w4 += 1;
v5x0123 = wasm_v128_load32_zero(w5);
w5 += 1;
v6x0123 = wasm_v128_load32_zero(w6);
w6 += 1;
break;
case 2:
v0x0123 = wasm_v128_load64_zero(w0);
w0 += 2;
v1x0123 = wasm_v128_load64_zero(w1);
w1 += 2;
v2x0123 = wasm_v128_load64_zero(w2);
w2 += 2;
v3x0123 = wasm_v128_load64_zero(w3);
w3 += 2;
v4x0123 = wasm_v128_load64_zero(w4);
w4 += 2;
v5x0123 = wasm_v128_load64_zero(w5);
w5 += 2;
v6x0123 = wasm_v128_load64_zero(w6);
w6 += 2;
break;
case 3:
v0x0123 = wasm_v128_load64_zero(w0);
w0 += 2;
v1x0123 = wasm_v128_load64_zero(w1);
w1 += 2;
v2x0123 = wasm_v128_load64_zero(w2);
w2 += 2;
v3x0123 = wasm_v128_load64_zero(w3);
w3 += 2;
v4x0123 = wasm_v128_load64_zero(w4);
w4 += 2;
v5x0123 = wasm_v128_load64_zero(w5);
w5 += 2;
v6x0123 = wasm_v128_load64_zero(w6);
w6 += 2;
v0x0123 = wasm_v128_load32_lane(w0, v0x0123, 2);
w0 += 1;
v1x0123 = wasm_v128_load32_lane(w1, v1x0123, 2);
w1 += 1;
v2x0123 = wasm_v128_load32_lane(w2, v2x0123, 2);
w2 += 1;
v3x0123 = wasm_v128_load32_lane(w3, v3x0123, 2);
w3 += 1;
v4x0123 = wasm_v128_load32_lane(w4, v4x0123, 2);
w4 += 1;
v5x0123 = wasm_v128_load32_lane(w5, v5x0123, 2);
w5 += 1;
v6x0123 = wasm_v128_load32_lane(w6, v6x0123, 2);
w6 += 1;
break;
default:
XNN_UNREACHABLE;
}
v1x0123 = wasm_v32x4_shuffle(v1x0123, v1x0123, 1, 2, 3, 0);
v2x0123 = wasm_v32x4_shuffle(v2x0123, v2x0123, 2, 3, 0, 1);
v3x0123 = wasm_v32x4_shuffle(v3x0123, v3x0123, 3, 0, 1, 2);
v5x0123 = wasm_v32x4_shuffle(v5x0123, v5x0123, 1, 2, 3, 0);
v6x0123 = wasm_v32x4_shuffle(v6x0123, v6x0123, 2, 3, 0, 1);
const v128_t v01x0_01x1 = wasm_v32x4_shuffle(v0x0123, v1x0123, 0, 4, 1, 5);
const v128_t v23x0_23x1 = wasm_v32x4_shuffle(v2x0123, v3x0123, 0, 4, 1, 5);
const v128_t v01x2_01x3 = wasm_v32x4_shuffle(v0x0123, v1x0123, 2, 6, 3, 7);
const v128_t v23x2_23x3 = wasm_v32x4_shuffle(v2x0123, v3x0123, 2, 6, 3, 7);
const v128_t v45x0_45x1 = wasm_v32x4_shuffle(v4x0123, v5x0123, 0, 4, 1, 5);
const v128_t v67x0_67x1 = wasm_v32x4_shuffle(v6x0123, v6x0123, 0, 4, 1, 5);
const v128_t v45x2_45x3 = wasm_v32x4_shuffle(v4x0123, v5x0123, 2, 6, 3, 7);
const v128_t v67x2_67x3 = wasm_v32x4_shuffle(v6x0123, v6x0123, 2, 6, 3, 7);
const v128_t v0123x0 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 0, 2);
const v128_t v0123x1 = wasm_v64x2_shuffle(v01x0_01x1, v23x0_23x1, 1, 3);
const v128_t v0123x2 = wasm_v64x2_shuffle(v01x2_01x3, v23x2_23x3, 0, 2);
const v128_t v0123x3 = wasm_v64x2_shuffle(v01x2_01x3, v23x2_23x3, 1, 3);
const v128_t v4567x0 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 0, 2);
const v128_t v4567x1 = wasm_v64x2_shuffle(v45x0_45x1, v67x0_67x1, 1, 3);
const v128_t v4567x2 = wasm_v64x2_shuffle(v45x2_45x3, v67x2_67x3, 0, 2);
const v128_t v4567x3 = wasm_v64x2_shuffle(v45x2_45x3, v67x2_67x3, 1, 3);
wasm_v128_store(packed_weights, v0123x0);
wasm_v128_store(packed_weights + 4, v4567x0);
wasm_v128_store(packed_weights + 8, v0123x1);
wasm_v128_store(packed_weights + 12, v4567x1);
wasm_v128_store(packed_weights + 16, v0123x2);
wasm_v128_store(packed_weights + 20, v4567x2);
wasm_v128_store(packed_weights + 24, v0123x3);
wasm_v128_store(packed_weights + 28, v4567x3);
packed_weights += 32;
}
packed_weights = (uint32_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 18,543
| 38.623932
| 83
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packx/x32-packx-4x-sse.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/packx.h>
void xnn_x32_packx_ukernel_4x__sse(
size_t m,
size_t k,
const uint32_t* restrict x,
size_t x_stride,
uint32_t* restrict y)
{
assert(m != 0);
assert(k != 0);
const float* x0 = (const float*) x;
const float* x1 = (const float*) ((uintptr_t) x0 + x_stride);
if (m < 2) {
x1 = x0;
}
const float* x2 = (const float*) ((uintptr_t) x1 + x_stride);
if (m <= 2) {
x2 = x1;
}
const float* x3 = (const float*) ((uintptr_t) x2 + x_stride);
if (m != 4) {
x3 = x2;
}
float* restrict y_f32 = (float*) y;
for (; k >= 4; k -= 4) {
const __m128 vx0 = _mm_loadu_ps(x0);
x0 += 4;
const __m128 vx1 = _mm_loadu_ps(x1);
x1 += 4;
const __m128 vx2 = _mm_loadu_ps(x2);
x2 += 4;
const __m128 vx3 = _mm_loadu_ps(x3);
x3 += 4;
const __m128 vt0 = _mm_unpacklo_ps(vx0, vx1);
const __m128 vt1 = _mm_unpackhi_ps(vx0, vx1);
const __m128 vt2 = _mm_unpacklo_ps(vx2, vx3);
const __m128 vt3 = _mm_unpackhi_ps(vx2, vx3);
const __m128 vy0 = _mm_movelh_ps(vt0, vt2);
_mm_store_ps(y_f32, vy0);
const __m128 vy1 = _mm_movehl_ps(vt2, vt0);
_mm_store_ps(y_f32 + 4, vy1);
const __m128 vy2 = _mm_movelh_ps(vt1, vt3);
_mm_store_ps(y_f32 + 8, vy2);
const __m128 vy3 = _mm_movehl_ps(vt3, vt1);
_mm_store_ps(y_f32 + 12, vy3);
y_f32 += 16;
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 vx0 = _mm_load_ss(x0);
x0 += 1;
const __m128 vx1 = _mm_load_ss(x1);
x1 += 1;
const __m128 vx2 = _mm_load_ss(x2);
x2 += 1;
const __m128 vx3 = _mm_load_ss(x3);
x3 += 1;
const __m128 vx01 = _mm_unpacklo_ps(vx0, vx1);
const __m128 vx23 = _mm_unpacklo_ps(vx2, vx3);
const __m128 vy = _mm_movelh_ps(vx01, vx23);
_mm_store_ps(y_f32, vy);
y_f32 += 4;
} while (--k != 0);
}
}
| 2,087
| 22.727273
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packx/x32-packx-4x-wasmsimd.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/packx.h>
void xnn_x32_packx_ukernel_4x__wasmsimd(
size_t m,
size_t k,
const uint32_t* restrict x_ptr,
size_t x_stride,
uint32_t* restrict y_ptr)
{
assert(m != 0);
assert(k != 0);
const float* x0 = (const float*) x_ptr;
const float* x1 = (const float*) ((uintptr_t) x0 + x_stride);
if (m < 2) {
x1 = x0;
}
const float* x2 = (const float*) ((uintptr_t) x1 + x_stride);
if (m <= 2) {
x2 = x1;
}
const float* x3 = (const float*) ((uintptr_t) x2 + x_stride);
if (m != 4) {
x3 = x2;
}
float* y = (float*) y_ptr;
for (; k >= 4; k -= 4) {
const v128_t vx0 = wasm_v128_load(x0);
x0 += 4;
const v128_t vx1 = wasm_v128_load(x1);
x1 += 4;
const v128_t vx2 = wasm_v128_load(x2);
x2 += 4;
const v128_t vx3 = wasm_v128_load(x3);
x3 += 4;
const v128_t vt0 = wasm_v32x4_shuffle(vx0, vx1, 0, 4, 1, 5);
const v128_t vt1 = wasm_v32x4_shuffle(vx0, vx1, 2, 6, 3, 7);
const v128_t vt2 = wasm_v32x4_shuffle(vx2, vx3, 0, 4, 1, 5);
const v128_t vt3 = wasm_v32x4_shuffle(vx2, vx3, 2, 6, 3, 7);
const v128_t vy0 = wasm_v32x4_shuffle(vt0, vt2, 0, 1, 4, 5);
wasm_v128_store(y, vy0);
const v128_t vy1 = wasm_v32x4_shuffle(vt0, vt2, 2, 3, 6, 7);
wasm_v128_store(y + 4, vy1);
const v128_t vy2 = wasm_v32x4_shuffle(vt1, vt3, 0, 1, 4, 5);
wasm_v128_store(y + 8, vy2);
const v128_t vy3 = wasm_v32x4_shuffle(vt1, vt3, 2, 3, 6, 7);
wasm_v128_store(y + 12, vy3);
y += 16;
}
if XNN_UNLIKELY(k != 0) {
do {
const float vx0 = *x0++;
const float vx1 = *x1++;
const float vx2 = *x2++;
const float vx3 = *x3++;
y[0] = vx0;
y[1] = vx1;
y[2] = vx2;
y[3] = vx3;
y += 4;
} while (--k != 0);
}
}
| 1,999
| 23.691358
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packx/gen/x32-packx-4x-neon-st4-x4-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packx/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/packx.h>
#include <xnnpack/prefetch.h>
void xnn_x32_packx_ukernel_4x__neon_st4_x4_prfm(
size_t m,
size_t k,
const uint32_t* x,
size_t x_stride,
uint32_t* restrict y)
{
assert(m != 0);
assert(m <= 4);
assert(k != 0);
assert(x != NULL);
assert(y != NULL);
const uint32_t* x0 = x;
const uint32_t* x1 = (const uint32_t*) ((uintptr_t) x0 + x_stride);
if XNN_UNPREDICTABLE(m < 2) {
x1 = x0;
}
const uint32_t* x2 = (const uint32_t*) ((uintptr_t) x1 + x_stride);
if XNN_UNPREDICTABLE(m <= 2) {
x2 = x1;
}
const uint32_t* x3 = (const uint32_t*) ((uintptr_t) x2 + x_stride);
if XNN_UNPREDICTABLE(m != 4) {
x3 = x2;
}
for (; k >= 4; k -= 4) {
uint32x4x4_t vx0123x0123;
vx0123x0123.val[0] = vld1q_u32(x0); x0 += 4;
vx0123x0123.val[1] = vld1q_u32(x1); x1 += 4;
vx0123x0123.val[2] = vld1q_u32(x2); x2 += 4;
vx0123x0123.val[3] = vld1q_u32(x3); x3 += 4;
xnn_prefetch_to_l1((const int8_t*) x0 + 128);
xnn_prefetch_to_l1((const int8_t*) x1 + 128);
xnn_prefetch_to_l1((const int8_t*) x2 + 128);
xnn_prefetch_to_l1((const int8_t*) x3 + 128);
vst4q_u32(y, vx0123x0123); y += 16;
}
if XNN_UNLIKELY(k != 0) {
uint32x4_t vt0123 = vdupq_n_u32(0);
do {
vt0123 = vld1q_lane_u32(x0, vt0123, 0); x0 += 1;
vt0123 = vld1q_lane_u32(x1, vt0123, 1); x1 += 1;
vt0123 = vld1q_lane_u32(x2, vt0123, 2); x2 += 1;
vt0123 = vld1q_lane_u32(x3, vt0123, 3); x3 += 1;
xnn_prefetch_to_l1((const int8_t*) x0 + 128);
xnn_prefetch_to_l1((const int8_t*) x1 + 128);
xnn_prefetch_to_l1((const int8_t*) x2 + 128);
xnn_prefetch_to_l1((const int8_t*) x3 + 128);
vst1q_u32(y, vt0123); y += 4;
} while (--k != 0);
}
}
| 2,077
| 27.081081
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packx/gen/x32-packx-4x-neon-st4-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packx/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/packx.h>
void xnn_x32_packx_ukernel_4x__neon_st4_x4(
size_t m,
size_t k,
const uint32_t* x,
size_t x_stride,
uint32_t* restrict y)
{
assert(m != 0);
assert(m <= 4);
assert(k != 0);
assert(x != NULL);
assert(y != NULL);
const uint32_t* x0 = x;
const uint32_t* x1 = (const uint32_t*) ((uintptr_t) x0 + x_stride);
if XNN_UNPREDICTABLE(m < 2) {
x1 = x0;
}
const uint32_t* x2 = (const uint32_t*) ((uintptr_t) x1 + x_stride);
if XNN_UNPREDICTABLE(m <= 2) {
x2 = x1;
}
const uint32_t* x3 = (const uint32_t*) ((uintptr_t) x2 + x_stride);
if XNN_UNPREDICTABLE(m != 4) {
x3 = x2;
}
for (; k >= 4; k -= 4) {
uint32x4x4_t vx0123x0123;
vx0123x0123.val[0] = vld1q_u32(x0); x0 += 4;
vx0123x0123.val[1] = vld1q_u32(x1); x1 += 4;
vx0123x0123.val[2] = vld1q_u32(x2); x2 += 4;
vx0123x0123.val[3] = vld1q_u32(x3); x3 += 4;
vst4q_u32(y, vx0123x0123); y += 16;
}
if XNN_UNLIKELY(k != 0) {
uint32x4_t vt0123 = vdupq_n_u32(0);
do {
vt0123 = vld1q_lane_u32(x0, vt0123, 0); x0 += 1;
vt0123 = vld1q_lane_u32(x1, vt0123, 1); x1 += 1;
vt0123 = vld1q_lane_u32(x2, vt0123, 2); x2 += 1;
vt0123 = vld1q_lane_u32(x3, vt0123, 3); x3 += 1;
vst1q_u32(y, vt0123); y += 4;
} while (--k != 0);
}
}
| 1,634
| 24.153846
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packx/gen/x32-packx-4x-neon-st4-x8-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packx/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/packx.h>
#include <xnnpack/prefetch.h>
void xnn_x32_packx_ukernel_4x__neon_st4_x8_prfm(
size_t m,
size_t k,
const uint32_t* x,
size_t x_stride,
uint32_t* restrict y)
{
assert(m != 0);
assert(m <= 4);
assert(k != 0);
assert(x != NULL);
assert(y != NULL);
const uint32_t* x0 = x;
const uint32_t* x1 = (const uint32_t*) ((uintptr_t) x0 + x_stride);
if XNN_UNPREDICTABLE(m < 2) {
x1 = x0;
}
const uint32_t* x2 = (const uint32_t*) ((uintptr_t) x1 + x_stride);
if XNN_UNPREDICTABLE(m <= 2) {
x2 = x1;
}
const uint32_t* x3 = (const uint32_t*) ((uintptr_t) x2 + x_stride);
if XNN_UNPREDICTABLE(m != 4) {
x3 = x2;
}
for (; k >= 8; k -= 8) {
uint32x4x4_t vx0123x0123;
uint32x4x4_t vx4567x0123;
vx0123x0123.val[0] = vld1q_u32(x0); x0 += 4;
vx0123x0123.val[1] = vld1q_u32(x1); x1 += 4;
vx0123x0123.val[2] = vld1q_u32(x2); x2 += 4;
vx0123x0123.val[3] = vld1q_u32(x3); x3 += 4;
vx4567x0123.val[0] = vld1q_u32(x0); x0 += 4;
vx4567x0123.val[1] = vld1q_u32(x1); x1 += 4;
vx4567x0123.val[2] = vld1q_u32(x2); x2 += 4;
vx4567x0123.val[3] = vld1q_u32(x3); x3 += 4;
xnn_prefetch_to_l1((const int8_t*) x0 + 128);
xnn_prefetch_to_l1((const int8_t*) x1 + 128);
xnn_prefetch_to_l1((const int8_t*) x2 + 128);
xnn_prefetch_to_l1((const int8_t*) x3 + 128);
vst4q_u32(y, vx0123x0123); y += 16;
vst4q_u32(y, vx4567x0123); y += 16;
}
if XNN_UNLIKELY(k != 0) {
uint32x4_t vt0123 = vdupq_n_u32(0);
do {
vt0123 = vld1q_lane_u32(x0, vt0123, 0); x0 += 1;
vt0123 = vld1q_lane_u32(x1, vt0123, 1); x1 += 1;
vt0123 = vld1q_lane_u32(x2, vt0123, 2); x2 += 1;
vt0123 = vld1q_lane_u32(x3, vt0123, 3); x3 += 1;
xnn_prefetch_to_l1((const int8_t*) x0 + 128);
xnn_prefetch_to_l1((const int8_t*) x1 + 128);
xnn_prefetch_to_l1((const int8_t*) x2 + 128);
xnn_prefetch_to_l1((const int8_t*) x3 + 128);
vst1q_u32(y, vt0123); y += 4;
} while (--k != 0);
}
}
| 2,343
| 28.3
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packx/gen/x32-packx-4x-neon-st4-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packx/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/packx.h>
void xnn_x32_packx_ukernel_4x__neon_st4_x8(
size_t m,
size_t k,
const uint32_t* x,
size_t x_stride,
uint32_t* restrict y)
{
assert(m != 0);
assert(m <= 4);
assert(k != 0);
assert(x != NULL);
assert(y != NULL);
const uint32_t* x0 = x;
const uint32_t* x1 = (const uint32_t*) ((uintptr_t) x0 + x_stride);
if XNN_UNPREDICTABLE(m < 2) {
x1 = x0;
}
const uint32_t* x2 = (const uint32_t*) ((uintptr_t) x1 + x_stride);
if XNN_UNPREDICTABLE(m <= 2) {
x2 = x1;
}
const uint32_t* x3 = (const uint32_t*) ((uintptr_t) x2 + x_stride);
if XNN_UNPREDICTABLE(m != 4) {
x3 = x2;
}
for (; k >= 8; k -= 8) {
uint32x4x4_t vx0123x0123;
uint32x4x4_t vx4567x0123;
vx0123x0123.val[0] = vld1q_u32(x0); x0 += 4;
vx0123x0123.val[1] = vld1q_u32(x1); x1 += 4;
vx0123x0123.val[2] = vld1q_u32(x2); x2 += 4;
vx0123x0123.val[3] = vld1q_u32(x3); x3 += 4;
vx4567x0123.val[0] = vld1q_u32(x0); x0 += 4;
vx4567x0123.val[1] = vld1q_u32(x1); x1 += 4;
vx4567x0123.val[2] = vld1q_u32(x2); x2 += 4;
vx4567x0123.val[3] = vld1q_u32(x3); x3 += 4;
vst4q_u32(y, vx0123x0123); y += 16;
vst4q_u32(y, vx4567x0123); y += 16;
}
if XNN_UNLIKELY(k != 0) {
uint32x4_t vt0123 = vdupq_n_u32(0);
do {
vt0123 = vld1q_lane_u32(x0, vt0123, 0); x0 += 1;
vt0123 = vld1q_lane_u32(x1, vt0123, 1); x1 += 1;
vt0123 = vld1q_lane_u32(x2, vt0123, 2); x2 += 1;
vt0123 = vld1q_lane_u32(x3, vt0123, 3); x3 += 1;
vst1q_u32(y, vt0123); y += 4;
} while (--k != 0);
}
}
| 1,900
| 25.774648
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packx/gen/x32-packx-8x-neon-st4-x4-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packx/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/packx.h>
#include <xnnpack/prefetch.h>
void xnn_x32_packx_ukernel_8x__neon_st4_x4_prfm(
size_t m,
size_t k,
const uint32_t* x,
size_t x_stride,
uint32_t* restrict y)
{
assert(m != 0);
assert(m <= 8);
assert(k != 0);
assert(x != NULL);
assert(y != NULL);
const uint32_t* x0 = x;
const uint32_t* x1 = (const uint32_t*) ((uintptr_t) x0 + x_stride);
if XNN_UNPREDICTABLE(m < 2) {
x1 = x0;
}
const uint32_t* x2 = (const uint32_t*) ((uintptr_t) x1 + x_stride);
if XNN_UNPREDICTABLE(m <= 2) {
x2 = x1;
}
const uint32_t* x3 = (const uint32_t*) ((uintptr_t) x2 + x_stride);
if XNN_UNPREDICTABLE(m < 4) {
x3 = x2;
}
const uint32_t* x4 = (const uint32_t*) ((uintptr_t) x3 + x_stride);
if XNN_UNPREDICTABLE(m <= 4) {
x4 = x3;
}
const uint32_t* x5 = (const uint32_t*) ((uintptr_t) x4 + x_stride);
if XNN_UNPREDICTABLE(m < 6) {
x5 = x4;
}
const uint32_t* x6 = (const uint32_t*) ((uintptr_t) x5 + x_stride);
if XNN_UNPREDICTABLE(m <= 6) {
x6 = x5;
}
const uint32_t* x7 = (const uint32_t*) ((uintptr_t) x6 + x_stride);
if XNN_UNPREDICTABLE(m != 8) {
x7 = x6;
}
for (; k >= 4; k -= 4) {
const uint32x4_t vx0123x0 = vld1q_u32(x0); x0 += 4;
const uint32x4_t vx0123x1 = vld1q_u32(x1); x1 += 4;
const uint32x4_t vx0123x2 = vld1q_u32(x2); x2 += 4;
const uint32x4_t vx0123x3 = vld1q_u32(x3); x3 += 4;
const uint32x4_t vx0123x4 = vld1q_u32(x4); x4 += 4;
const uint32x4_t vx0123x5 = vld1q_u32(x5); x5 += 4;
const uint32x4_t vx0123x6 = vld1q_u32(x6); x6 += 4;
const uint32x4_t vx0123x7 = vld1q_u32(x7); x7 += 4;
xnn_prefetch_to_l1((const int8_t*) x0 + 128);
xnn_prefetch_to_l1((const int8_t*) x1 + 128);
xnn_prefetch_to_l1((const int8_t*) x2 + 128);
xnn_prefetch_to_l1((const int8_t*) x3 + 128);
xnn_prefetch_to_l1((const int8_t*) x4 + 128);
xnn_prefetch_to_l1((const int8_t*) x5 + 128);
xnn_prefetch_to_l1((const int8_t*) x6 + 128);
xnn_prefetch_to_l1((const int8_t*) x7 + 128);
const uint32x4x2_t vz0123x0 = vzipq_u32(vx0123x0, vx0123x4);
const uint32x4x2_t vz0123x1 = vzipq_u32(vx0123x1, vx0123x5);
const uint32x4x2_t vz0123x2 = vzipq_u32(vx0123x2, vx0123x6);
const uint32x4x2_t vz0123x3 = vzipq_u32(vx0123x3, vx0123x7);
const uint32x4x4_t vy0123x0 = { vz0123x0.val[0], vz0123x1.val[0], vz0123x2.val[0], vz0123x3.val[0] };
const uint32x4x4_t vy0123x1 = { vz0123x0.val[1], vz0123x1.val[1], vz0123x2.val[1], vz0123x3.val[1] };
vst4q_u32(y, vy0123x0); y += 16;
vst4q_u32(y, vy0123x1); y += 16;
}
if XNN_UNLIKELY(k != 0) {
uint32x4_t vt0123 = vdupq_n_u32(0);
uint32x4_t vt4567 = vdupq_n_u32(0);
do {
vt0123 = vld1q_lane_u32(x0, vt0123, 0); x0 += 1;
vt0123 = vld1q_lane_u32(x1, vt0123, 1); x1 += 1;
vt0123 = vld1q_lane_u32(x2, vt0123, 2); x2 += 1;
vt0123 = vld1q_lane_u32(x3, vt0123, 3); x3 += 1;
vt4567 = vld1q_lane_u32(x4, vt4567, 0); x4 += 1;
vt4567 = vld1q_lane_u32(x5, vt4567, 1); x5 += 1;
vt4567 = vld1q_lane_u32(x6, vt4567, 2); x6 += 1;
vt4567 = vld1q_lane_u32(x7, vt4567, 3); x7 += 1;
xnn_prefetch_to_l1((const int8_t*) x0 + 128);
xnn_prefetch_to_l1((const int8_t*) x1 + 128);
xnn_prefetch_to_l1((const int8_t*) x2 + 128);
xnn_prefetch_to_l1((const int8_t*) x3 + 128);
xnn_prefetch_to_l1((const int8_t*) x4 + 128);
xnn_prefetch_to_l1((const int8_t*) x5 + 128);
xnn_prefetch_to_l1((const int8_t*) x6 + 128);
xnn_prefetch_to_l1((const int8_t*) x7 + 128);
vst1q_u32(y, vt0123); y += 4;
vst1q_u32(y, vt4567); y += 4;
} while (--k != 0);
}
}
| 3,988
| 33.686957
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packx/gen/x32-packx-8x-neon-st4-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packx/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/packx.h>
void xnn_x32_packx_ukernel_8x__neon_st4_x4(
size_t m,
size_t k,
const uint32_t* x,
size_t x_stride,
uint32_t* restrict y)
{
assert(m != 0);
assert(m <= 8);
assert(k != 0);
assert(x != NULL);
assert(y != NULL);
const uint32_t* x0 = x;
const uint32_t* x1 = (const uint32_t*) ((uintptr_t) x0 + x_stride);
if XNN_UNPREDICTABLE(m < 2) {
x1 = x0;
}
const uint32_t* x2 = (const uint32_t*) ((uintptr_t) x1 + x_stride);
if XNN_UNPREDICTABLE(m <= 2) {
x2 = x1;
}
const uint32_t* x3 = (const uint32_t*) ((uintptr_t) x2 + x_stride);
if XNN_UNPREDICTABLE(m < 4) {
x3 = x2;
}
const uint32_t* x4 = (const uint32_t*) ((uintptr_t) x3 + x_stride);
if XNN_UNPREDICTABLE(m <= 4) {
x4 = x3;
}
const uint32_t* x5 = (const uint32_t*) ((uintptr_t) x4 + x_stride);
if XNN_UNPREDICTABLE(m < 6) {
x5 = x4;
}
const uint32_t* x6 = (const uint32_t*) ((uintptr_t) x5 + x_stride);
if XNN_UNPREDICTABLE(m <= 6) {
x6 = x5;
}
const uint32_t* x7 = (const uint32_t*) ((uintptr_t) x6 + x_stride);
if XNN_UNPREDICTABLE(m != 8) {
x7 = x6;
}
for (; k >= 4; k -= 4) {
const uint32x4_t vx0123x0 = vld1q_u32(x0); x0 += 4;
const uint32x4_t vx0123x1 = vld1q_u32(x1); x1 += 4;
const uint32x4_t vx0123x2 = vld1q_u32(x2); x2 += 4;
const uint32x4_t vx0123x3 = vld1q_u32(x3); x3 += 4;
const uint32x4_t vx0123x4 = vld1q_u32(x4); x4 += 4;
const uint32x4_t vx0123x5 = vld1q_u32(x5); x5 += 4;
const uint32x4_t vx0123x6 = vld1q_u32(x6); x6 += 4;
const uint32x4_t vx0123x7 = vld1q_u32(x7); x7 += 4;
const uint32x4x2_t vz0123x0 = vzipq_u32(vx0123x0, vx0123x4);
const uint32x4x2_t vz0123x1 = vzipq_u32(vx0123x1, vx0123x5);
const uint32x4x2_t vz0123x2 = vzipq_u32(vx0123x2, vx0123x6);
const uint32x4x2_t vz0123x3 = vzipq_u32(vx0123x3, vx0123x7);
const uint32x4x4_t vy0123x0 = { vz0123x0.val[0], vz0123x1.val[0], vz0123x2.val[0], vz0123x3.val[0] };
const uint32x4x4_t vy0123x1 = { vz0123x0.val[1], vz0123x1.val[1], vz0123x2.val[1], vz0123x3.val[1] };
vst4q_u32(y, vy0123x0); y += 16;
vst4q_u32(y, vy0123x1); y += 16;
}
if XNN_UNLIKELY(k != 0) {
uint32x4_t vt0123 = vdupq_n_u32(0);
uint32x4_t vt4567 = vdupq_n_u32(0);
do {
vt0123 = vld1q_lane_u32(x0, vt0123, 0); x0 += 1;
vt0123 = vld1q_lane_u32(x1, vt0123, 1); x1 += 1;
vt0123 = vld1q_lane_u32(x2, vt0123, 2); x2 += 1;
vt0123 = vld1q_lane_u32(x3, vt0123, 3); x3 += 1;
vt4567 = vld1q_lane_u32(x4, vt4567, 0); x4 += 1;
vt4567 = vld1q_lane_u32(x5, vt4567, 1); x5 += 1;
vt4567 = vld1q_lane_u32(x6, vt4567, 2); x6 += 1;
vt4567 = vld1q_lane_u32(x7, vt4567, 3); x7 += 1;
vst1q_u32(y, vt0123); y += 4;
vst1q_u32(y, vt4567); y += 4;
} while (--k != 0);
}
}
| 3,137
| 31.020408
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packx/gen/x32-packx-8x-neon-st4-x8-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packx/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/packx.h>
#include <xnnpack/prefetch.h>
void xnn_x32_packx_ukernel_8x__neon_st4_x8_prfm(
size_t m,
size_t k,
const uint32_t* x,
size_t x_stride,
uint32_t* restrict y)
{
assert(m != 0);
assert(m <= 8);
assert(k != 0);
assert(x != NULL);
assert(y != NULL);
const uint32_t* x0 = x;
const uint32_t* x1 = (const uint32_t*) ((uintptr_t) x0 + x_stride);
if XNN_UNPREDICTABLE(m < 2) {
x1 = x0;
}
const uint32_t* x2 = (const uint32_t*) ((uintptr_t) x1 + x_stride);
if XNN_UNPREDICTABLE(m <= 2) {
x2 = x1;
}
const uint32_t* x3 = (const uint32_t*) ((uintptr_t) x2 + x_stride);
if XNN_UNPREDICTABLE(m < 4) {
x3 = x2;
}
const uint32_t* x4 = (const uint32_t*) ((uintptr_t) x3 + x_stride);
if XNN_UNPREDICTABLE(m <= 4) {
x4 = x3;
}
const uint32_t* x5 = (const uint32_t*) ((uintptr_t) x4 + x_stride);
if XNN_UNPREDICTABLE(m < 6) {
x5 = x4;
}
const uint32_t* x6 = (const uint32_t*) ((uintptr_t) x5 + x_stride);
if XNN_UNPREDICTABLE(m <= 6) {
x6 = x5;
}
const uint32_t* x7 = (const uint32_t*) ((uintptr_t) x6 + x_stride);
if XNN_UNPREDICTABLE(m != 8) {
x7 = x6;
}
for (; k >= 8; k -= 8) {
const uint32x4_t vx0123x0 = vld1q_u32(x0); x0 += 4;
const uint32x4_t vx0123x1 = vld1q_u32(x1); x1 += 4;
const uint32x4_t vx0123x2 = vld1q_u32(x2); x2 += 4;
const uint32x4_t vx0123x3 = vld1q_u32(x3); x3 += 4;
const uint32x4_t vx0123x4 = vld1q_u32(x4); x4 += 4;
const uint32x4_t vx0123x5 = vld1q_u32(x5); x5 += 4;
const uint32x4_t vx0123x6 = vld1q_u32(x6); x6 += 4;
const uint32x4_t vx0123x7 = vld1q_u32(x7); x7 += 4;
const uint32x4_t vx4567x0 = vld1q_u32(x0); x0 += 4;
const uint32x4_t vx4567x1 = vld1q_u32(x1); x1 += 4;
const uint32x4_t vx4567x2 = vld1q_u32(x2); x2 += 4;
const uint32x4_t vx4567x3 = vld1q_u32(x3); x3 += 4;
const uint32x4_t vx4567x4 = vld1q_u32(x4); x4 += 4;
const uint32x4_t vx4567x5 = vld1q_u32(x5); x5 += 4;
const uint32x4_t vx4567x6 = vld1q_u32(x6); x6 += 4;
const uint32x4_t vx4567x7 = vld1q_u32(x7); x7 += 4;
xnn_prefetch_to_l1((const int8_t*) x0 + 128);
xnn_prefetch_to_l1((const int8_t*) x1 + 128);
xnn_prefetch_to_l1((const int8_t*) x2 + 128);
xnn_prefetch_to_l1((const int8_t*) x3 + 128);
xnn_prefetch_to_l1((const int8_t*) x4 + 128);
xnn_prefetch_to_l1((const int8_t*) x5 + 128);
xnn_prefetch_to_l1((const int8_t*) x6 + 128);
xnn_prefetch_to_l1((const int8_t*) x7 + 128);
const uint32x4x2_t vz0123x0 = vzipq_u32(vx0123x0, vx0123x4);
const uint32x4x2_t vz0123x1 = vzipq_u32(vx0123x1, vx0123x5);
const uint32x4x2_t vz0123x2 = vzipq_u32(vx0123x2, vx0123x6);
const uint32x4x2_t vz0123x3 = vzipq_u32(vx0123x3, vx0123x7);
const uint32x4x2_t vz4567x0 = vzipq_u32(vx4567x0, vx4567x4);
const uint32x4x2_t vz4567x1 = vzipq_u32(vx4567x1, vx4567x5);
const uint32x4x2_t vz4567x2 = vzipq_u32(vx4567x2, vx4567x6);
const uint32x4x2_t vz4567x3 = vzipq_u32(vx4567x3, vx4567x7);
const uint32x4x4_t vy0123x0 = { vz0123x0.val[0], vz0123x1.val[0], vz0123x2.val[0], vz0123x3.val[0] };
const uint32x4x4_t vy0123x1 = { vz0123x0.val[1], vz0123x1.val[1], vz0123x2.val[1], vz0123x3.val[1] };
const uint32x4x4_t vy4567x0 = { vz4567x0.val[0], vz4567x1.val[0], vz4567x2.val[0], vz4567x3.val[0] };
const uint32x4x4_t vy4567x1 = { vz4567x0.val[1], vz4567x1.val[1], vz4567x2.val[1], vz4567x3.val[1] };
vst4q_u32(y, vy0123x0); y += 16;
vst4q_u32(y, vy0123x1); y += 16;
vst4q_u32(y, vy4567x0); y += 16;
vst4q_u32(y, vy4567x1); y += 16;
}
if XNN_UNLIKELY(k != 0) {
uint32x4_t vt0123 = vdupq_n_u32(0);
uint32x4_t vt4567 = vdupq_n_u32(0);
do {
vt0123 = vld1q_lane_u32(x0, vt0123, 0); x0 += 1;
vt0123 = vld1q_lane_u32(x1, vt0123, 1); x1 += 1;
vt0123 = vld1q_lane_u32(x2, vt0123, 2); x2 += 1;
vt0123 = vld1q_lane_u32(x3, vt0123, 3); x3 += 1;
vt4567 = vld1q_lane_u32(x4, vt4567, 0); x4 += 1;
vt4567 = vld1q_lane_u32(x5, vt4567, 1); x5 += 1;
vt4567 = vld1q_lane_u32(x6, vt4567, 2); x6 += 1;
vt4567 = vld1q_lane_u32(x7, vt4567, 3); x7 += 1;
xnn_prefetch_to_l1((const int8_t*) x0 + 128);
xnn_prefetch_to_l1((const int8_t*) x1 + 128);
xnn_prefetch_to_l1((const int8_t*) x2 + 128);
xnn_prefetch_to_l1((const int8_t*) x3 + 128);
xnn_prefetch_to_l1((const int8_t*) x4 + 128);
xnn_prefetch_to_l1((const int8_t*) x5 + 128);
xnn_prefetch_to_l1((const int8_t*) x6 + 128);
xnn_prefetch_to_l1((const int8_t*) x7 + 128);
vst1q_u32(y, vt0123); y += 4;
vst1q_u32(y, vt4567); y += 4;
} while (--k != 0);
}
}
| 4,982
| 37.038168
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-packx/gen/x32-packx-8x-neon-st4-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packx/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/packx.h>
void xnn_x32_packx_ukernel_8x__neon_st4_x8(
size_t m,
size_t k,
const uint32_t* x,
size_t x_stride,
uint32_t* restrict y)
{
assert(m != 0);
assert(m <= 8);
assert(k != 0);
assert(x != NULL);
assert(y != NULL);
const uint32_t* x0 = x;
const uint32_t* x1 = (const uint32_t*) ((uintptr_t) x0 + x_stride);
if XNN_UNPREDICTABLE(m < 2) {
x1 = x0;
}
const uint32_t* x2 = (const uint32_t*) ((uintptr_t) x1 + x_stride);
if XNN_UNPREDICTABLE(m <= 2) {
x2 = x1;
}
const uint32_t* x3 = (const uint32_t*) ((uintptr_t) x2 + x_stride);
if XNN_UNPREDICTABLE(m < 4) {
x3 = x2;
}
const uint32_t* x4 = (const uint32_t*) ((uintptr_t) x3 + x_stride);
if XNN_UNPREDICTABLE(m <= 4) {
x4 = x3;
}
const uint32_t* x5 = (const uint32_t*) ((uintptr_t) x4 + x_stride);
if XNN_UNPREDICTABLE(m < 6) {
x5 = x4;
}
const uint32_t* x6 = (const uint32_t*) ((uintptr_t) x5 + x_stride);
if XNN_UNPREDICTABLE(m <= 6) {
x6 = x5;
}
const uint32_t* x7 = (const uint32_t*) ((uintptr_t) x6 + x_stride);
if XNN_UNPREDICTABLE(m != 8) {
x7 = x6;
}
for (; k >= 8; k -= 8) {
const uint32x4_t vx0123x0 = vld1q_u32(x0); x0 += 4;
const uint32x4_t vx0123x1 = vld1q_u32(x1); x1 += 4;
const uint32x4_t vx0123x2 = vld1q_u32(x2); x2 += 4;
const uint32x4_t vx0123x3 = vld1q_u32(x3); x3 += 4;
const uint32x4_t vx0123x4 = vld1q_u32(x4); x4 += 4;
const uint32x4_t vx0123x5 = vld1q_u32(x5); x5 += 4;
const uint32x4_t vx0123x6 = vld1q_u32(x6); x6 += 4;
const uint32x4_t vx0123x7 = vld1q_u32(x7); x7 += 4;
const uint32x4_t vx4567x0 = vld1q_u32(x0); x0 += 4;
const uint32x4_t vx4567x1 = vld1q_u32(x1); x1 += 4;
const uint32x4_t vx4567x2 = vld1q_u32(x2); x2 += 4;
const uint32x4_t vx4567x3 = vld1q_u32(x3); x3 += 4;
const uint32x4_t vx4567x4 = vld1q_u32(x4); x4 += 4;
const uint32x4_t vx4567x5 = vld1q_u32(x5); x5 += 4;
const uint32x4_t vx4567x6 = vld1q_u32(x6); x6 += 4;
const uint32x4_t vx4567x7 = vld1q_u32(x7); x7 += 4;
const uint32x4x2_t vz0123x0 = vzipq_u32(vx0123x0, vx0123x4);
const uint32x4x2_t vz0123x1 = vzipq_u32(vx0123x1, vx0123x5);
const uint32x4x2_t vz0123x2 = vzipq_u32(vx0123x2, vx0123x6);
const uint32x4x2_t vz0123x3 = vzipq_u32(vx0123x3, vx0123x7);
const uint32x4x2_t vz4567x0 = vzipq_u32(vx4567x0, vx4567x4);
const uint32x4x2_t vz4567x1 = vzipq_u32(vx4567x1, vx4567x5);
const uint32x4x2_t vz4567x2 = vzipq_u32(vx4567x2, vx4567x6);
const uint32x4x2_t vz4567x3 = vzipq_u32(vx4567x3, vx4567x7);
const uint32x4x4_t vy0123x0 = { vz0123x0.val[0], vz0123x1.val[0], vz0123x2.val[0], vz0123x3.val[0] };
const uint32x4x4_t vy0123x1 = { vz0123x0.val[1], vz0123x1.val[1], vz0123x2.val[1], vz0123x3.val[1] };
const uint32x4x4_t vy4567x0 = { vz4567x0.val[0], vz4567x1.val[0], vz4567x2.val[0], vz4567x3.val[0] };
const uint32x4x4_t vy4567x1 = { vz4567x0.val[1], vz4567x1.val[1], vz4567x2.val[1], vz4567x3.val[1] };
vst4q_u32(y, vy0123x0); y += 16;
vst4q_u32(y, vy0123x1); y += 16;
vst4q_u32(y, vy4567x0); y += 16;
vst4q_u32(y, vy4567x1); y += 16;
}
if XNN_UNLIKELY(k != 0) {
uint32x4_t vt0123 = vdupq_n_u32(0);
uint32x4_t vt4567 = vdupq_n_u32(0);
do {
vt0123 = vld1q_lane_u32(x0, vt0123, 0); x0 += 1;
vt0123 = vld1q_lane_u32(x1, vt0123, 1); x1 += 1;
vt0123 = vld1q_lane_u32(x2, vt0123, 2); x2 += 1;
vt0123 = vld1q_lane_u32(x3, vt0123, 3); x3 += 1;
vt4567 = vld1q_lane_u32(x4, vt4567, 0); x4 += 1;
vt4567 = vld1q_lane_u32(x5, vt4567, 1); x5 += 1;
vt4567 = vld1q_lane_u32(x6, vt4567, 2); x6 += 1;
vt4567 = vld1q_lane_u32(x7, vt4567, 3); x7 += 1;
vst1q_u32(y, vt0123); y += 4;
vst1q_u32(y, vt4567); y += 4;
} while (--k != 0);
}
}
| 4,131
| 35.245614
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/x32-transposec-4x4-aarch64-neon-tbl128.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_aarch64_neon_tbl128(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * sizeof(uint32_t);
const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_reset = tile_height * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const size_t tile_stride = tile_height * input_stride;
const uint8_t* i0 = (const uint8_t*) input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
uint8_t* o0 = (uint8_t*) output;
uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride);
uint8_t* o2 = (uint8_t*) ((uintptr_t) o1 + output_stride);
uint8_t* o3 = (uint8_t*) ((uintptr_t) o2 + output_stride);
const uint8x16_t vperm0 = vld1q_u8(params->neon_tbl128.pos0);
const uint8x16_t vperm1 = vld1q_u8(params->neon_tbl128.pos1);
const uint8x16_t vperm2 = vld1q_u8(params->neon_tbl128.pos2);
const uint8x16_t vperm3 = vld1q_u8(params->neon_tbl128.pos3);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
uint8x16x4_t v;
v.val[0] = vld1q_u8(i0); i0 = (const uint8_t*) ((uintptr_t) i0 + tile_stride);
v.val[1] = vld1q_u8(i1); i1 = (const uint8_t*) ((uintptr_t) i1 + tile_stride);
v.val[2] = vld1q_u8(i2); i2 = (const uint8_t*) ((uintptr_t) i2 + tile_stride);
v.val[3] = vld1q_u8(i3); i3 = (const uint8_t*) ((uintptr_t) i3 + tile_stride);
uint8x16_t vres0 = vqtbl4q_u8(v, vperm0);
uint8x16_t vres1 = vqtbl4q_u8(v, vperm1);
uint8x16_t vres2 = vqtbl4q_u8(v, vperm2);
uint8x16_t vres3 = vqtbl4q_u8(v, vperm3);
vst1q_u8(o3, vres3); o3 = (uint8_t*) ((uintptr_t) o3 + tile_wbytes);
vst1q_u8(o2, vres2); o2 = (uint8_t*) ((uintptr_t) o2 + tile_wbytes);
vst1q_u8(o1, vres1); o1 = (uint8_t*) ((uintptr_t) o1 + tile_wbytes);
vst1q_u8(o0, vres0); o0 = (uint8_t*) ((uintptr_t) o0 + tile_wbytes);
}
if (bh != 0) {
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
uint8x16x4_t v;
v.val[0] = vld1q_u8(i0);
v.val[1] = vld1q_u8(i1);
v.val[2] = vld1q_u8(i2);
uint8x16_t vres0 = vqtbl4q_u8(v, vperm0);
uint8x16_t vres1 = vqtbl4q_u8(v, vperm1);
uint8x16_t vres2 = vqtbl4q_u8(v, vperm2);
uint8x16_t vres3 = vqtbl4q_u8(v, vperm3);
uint8x8_t vres0_low = vget_low_u8(vres0);
uint8x8_t vres1_low = vget_low_u8(vres1);
uint8x8_t vres2_low = vget_low_u8(vres2);
uint8x8_t vres3_low = vget_low_u8(vres3);
if (bh & 2) {
vst1_u8(o3, vres3_low); o3 += 8;
vst1_u8(o2, vres2_low); o2 += 8;
vst1_u8(o1, vres1_low); o1 += 8;
vst1_u8(o0, vres0_low); o0 += 8;
vres0_low = vget_high_u8(vres0);
vres1_low = vget_high_u8(vres1);
vres2_low = vget_high_u8(vres2);
vres3_low = vget_high_u8(vres3);
}
if (bh & 1) {
vst1_lane_u32((void*) o3, vreinterpret_u32_u8(vres3_low), 0);
vst1_lane_u32((void*) o2, vreinterpret_u32_u8(vres2_low), 0);
vst1_lane_u32((void*) o1, vreinterpret_u32_u8(vres1_low), 0);
vst1_lane_u32((void*) o0, vreinterpret_u32_u8(vres0_low), 0);
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
o0 = (uint8_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint8_t*) ((uintptr_t) o1 + output_reset);
o2 = (uint8_t*) ((uintptr_t) o2 + output_reset);
o3 = (uint8_t*) ((uintptr_t) o3 + output_reset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 4,818
| 37.246032
| 111
|
c
|
XNNPACK
|
XNNPACK-master/src/x32-transposec/x32-transposec-4x4-sse.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <xmmintrin.h>
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/transpose.h>
void xnn_x32_transposec_ukernel__4x4_sse(
const uint32_t* input,
uint32_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_stride >= block_height * sizeof(uint32_t));
assert(input_stride >= block_width * sizeof(uint32_t));
const size_t tile_height = 4;
const size_t tile_width = 4;
const size_t tile_wbytes = tile_width * sizeof(float);
const size_t input_vreset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
const size_t output_vreset = tile_height * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t);
const size_t input_offset = tile_height * input_stride;
const float* i0 = (const float*) input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o0 = (float*) output;
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
if XNN_UNPREDICTABLE(block_width <= 2) {
o2 = o0;
}
if XNN_UNPREDICTABLE(block_width < 4) {
o3 = o0;
}
size_t bh = block_height;
for (; bh >= 4; bh -= 4) {
__m128 v0 = _mm_loadu_ps(i0);
i0 = (const float*) ((uintptr_t) i0 + input_offset);
__m128 v1 = _mm_loadu_ps(i1);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
__m128 v2 = _mm_loadu_ps(i2);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
__m128 v3 = _mm_loadu_ps(i3);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
_MM_TRANSPOSE4_PS(v0, v1, v2, v3);
_mm_storeu_ps(o3, v3);
o3 = (float*) ((uintptr_t) o3 + tile_wbytes);
_mm_storeu_ps(o2, v2);
o2 = (float*) ((uintptr_t) o2 + tile_wbytes);
_mm_storeu_ps(o1, v1);
o1 = (float*) ((uintptr_t) o1 + tile_wbytes);
_mm_storeu_ps(o0, v0);
o0 = (float*) ((uintptr_t) o0 + tile_wbytes);
}
if (bh != 0) {
if XNN_UNPREDICTABLE(bh <= 2) {
i2 = i0;
}
if XNN_UNPREDICTABLE(bh < 2) {
i1 = i0;
}
__m128 v0 = _mm_loadu_ps(i0);
__m128 v1 = _mm_loadu_ps(i1);
__m128 v2 = _mm_loadu_ps(i2);
__m128 v3 = _mm_setzero_ps();
_MM_TRANSPOSE4_PS(v0, v1, v2, v3);
if (bh & 2) {
_mm_storel_pi((__m64*) o3, v3);
o3 += 2;
_mm_storel_pi((__m64*) o2, v2);
o2 += 2;
_mm_storel_pi((__m64*) o1, v1);
o1 += 2;
_mm_storel_pi((__m64*) o0, v0);
o0 += 2;
v0 = _mm_movehl_ps(v0, v0);
v1 = _mm_movehl_ps(v1, v1);
v2 = _mm_movehl_ps(v2, v2);
v3 = _mm_movehl_ps(v3, v3);
}
if (bh & 1) {
_mm_store_ss(o3, v3);
_mm_store_ss(o2, v2);
_mm_store_ss(o1, v1);
_mm_store_ss(o0, v0);
}
}
i0 = (const float*) ((uintptr_t) i0 + input_vreset);
i1 = (const float*) ((uintptr_t) i0 + input_stride);
i2 = (const float*) ((uintptr_t) i1 + input_stride);
i3 = (const float*) ((uintptr_t) i2 + input_stride);
o0 = (float*) ((uintptr_t) o0 + output_vreset);
o1 = (float*) ((uintptr_t) o1 + output_vreset);
o2 = (float*) ((uintptr_t) o2 + output_vreset);
o3 = (float*) ((uintptr_t) o3 + output_vreset);
block_width = doz(block_width, tile_width);
} while (block_width != 0);
}
| 3,964
| 31.5
| 112
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.