repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c4-minmax-neon-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c4__neon_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float32x4_t vscale0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vacc1x0123 = vld1q_f32(i1); i1 += 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
const float32x4_t vbias0123 = vld1q_f32(w); w += 4;
vacc0x0123 = vaddq_f32(vacc0x0123, vbias0123);
vacc1x0123 = vaddq_f32(vacc1x0123, vbias0123);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const float32x4_t vscale0123 = vld1q_f32(w);
float32x4_t vacc0x0123 = vld1q_f32(i0); i0 = (const float*) ((uintptr_t) i0 + c);
float32x4_t vacc1x0123 = vld1q_f32(i1); i1 = (const float*) ((uintptr_t) i1 + c);
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
const float32x4_t vbias0123 = vld1q_f32(w + 4);
vacc0x0123 = vaddq_f32(vacc0x0123, vbias0123);
vacc1x0123 = vaddq_f32(vacc1x0123, vbias0123);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
if (c & (2 * sizeof(float))) {
vst1_f32(o0, vacc0x01); o0 += 2;
vst1_f32(o1, vacc1x01); o1 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
}
if (c & (1 * sizeof(float))) {
vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
vst1_lane_f32(o1, vacc1x01, 0); o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,633
| 30.877193
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c4-minmax-neonfma-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c4__neonfma_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float32x4_t vscale0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vacc1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vbias0123 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vbias0123, vscale0123, vacc0x0123);
vacc1x0123 = vfmaq_f32(vbias0123, vscale0123, vacc1x0123);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const float32x4_t vscale0123 = vld1q_f32(w);
float32x4_t vacc0x0123 = vld1q_f32(i0); i0 = (const float*) ((uintptr_t) i0 + c);
float32x4_t vacc1x0123 = vld1q_f32(i1); i1 = (const float*) ((uintptr_t) i1 + c);
const float32x4_t vbias0123 = vld1q_f32(w + 4);
vacc0x0123 = vfmaq_f32(vbias0123, vscale0123, vacc0x0123);
vacc1x0123 = vfmaq_f32(vbias0123, vscale0123, vacc1x0123);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
if (c & (2 * sizeof(float))) {
vst1_f32(o0, vacc0x01); o0 += 2;
vst1_f32(o1, vacc1x01); o1 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
}
if (c & (1 * sizeof(float))) {
vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
vst1_lane_f32(o1, vacc1x01, 0); o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,468
| 30.536364
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c4-minmax-scalar-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c4__scalar_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float vscale0 = w[0];
const float vscale1 = w[1];
const float vscale2 = w[2];
const float vscale3 = w[3];
float vacc0x0 = i0[0];
float vacc0x1 = i0[1];
float vacc0x2 = i0[2];
float vacc0x3 = i0[3];
i0 += 4;
float vacc1x0 = i1[0];
float vacc1x1 = i1[1];
float vacc1x2 = i1[2];
float vacc1x3 = i1[3];
i1 += 4;
const float vbias0 = w[4];
const float vbias1 = w[5];
const float vbias2 = w[6];
const float vbias3 = w[7];
vacc0x0 = vacc0x0 * vscale0 + vbias0;
vacc0x1 = vacc0x1 * vscale1 + vbias1;
vacc0x2 = vacc0x2 * vscale2 + vbias2;
vacc0x3 = vacc0x3 * vscale3 + vbias3;
vacc1x0 = vacc1x0 * vscale0 + vbias0;
vacc1x1 = vacc1x1 * vscale1 + vbias1;
vacc1x2 = vacc1x2 * vscale2 + vbias2;
vacc1x3 = vacc1x3 * vscale3 + vbias3;
vacc0x0 = math_max_f32(vacc0x0, vmin);
vacc0x1 = math_max_f32(vacc0x1, vmin);
vacc0x2 = math_max_f32(vacc0x2, vmin);
vacc0x3 = math_max_f32(vacc0x3, vmin);
vacc1x0 = math_max_f32(vacc1x0, vmin);
vacc1x1 = math_max_f32(vacc1x1, vmin);
vacc1x2 = math_max_f32(vacc1x2, vmin);
vacc1x3 = math_max_f32(vacc1x3, vmin);
vacc0x0 = math_min_f32(vacc0x0, vmax);
vacc0x1 = math_min_f32(vacc0x1, vmax);
vacc0x2 = math_min_f32(vacc0x2, vmax);
vacc0x3 = math_min_f32(vacc0x3, vmax);
vacc1x0 = math_min_f32(vacc1x0, vmax);
vacc1x1 = math_min_f32(vacc1x1, vmax);
vacc1x2 = math_min_f32(vacc1x2, vmax);
vacc1x3 = math_min_f32(vacc1x3, vmax);
o0[0] = vacc0x0;
o0[1] = vacc0x1;
o0[2] = vacc0x2;
o0[3] = vacc0x3;
o0 += 4;
o1[0] = vacc1x0;
o1[1] = vacc1x1;
o1[2] = vacc1x2;
o1[3] = vacc1x3;
o1 += 4;
w += 8;
}
if XNN_UNLIKELY(c != 0) {
do {
const float vscale = *w++;
float vacc0 = *i0++;
float vacc1 = *i1++;
const float vbias = w[3];
vacc0 = vacc0 * vscale + vbias;
vacc1 = vacc1 * vscale + vbias;
vacc0 = math_max_f32(vacc0, vmin);
vacc1 = math_max_f32(vacc1, vmin);
vacc0 = math_min_f32(vacc0, vmax);
vacc1 = math_min_f32(vacc1, vmax);
*o0++ = vacc0;
*o1++ = vacc1;
c -= sizeof(float);
} while (c != 0);
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,997
| 27.35461
| 75
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c4-minmax-sse-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c4__sse_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const __m128 vmin = _mm_load_ps(params->sse.min);
const __m128 vmax = _mm_load_ps(params->sse.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const __m128 vscale0123 = _mm_load_ps(w);
__m128 vacc0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vacc1x0123 = _mm_loadu_ps(i1);
i1 += 4;
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
const __m128 vbias0123 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, vbias0123);
vacc1x0123 = _mm_add_ps(vacc1x0123, vbias0123);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
_mm_storeu_ps(o0, vacc0x0123);
o0 += 4;
_mm_storeu_ps(o1, vacc1x0123);
o1 += 4;
w += 8;
}
if XNN_UNLIKELY(c != 0) {
const __m128 vscale0123 = _mm_load_ps(w);
__m128 vacc0x0123 = _mm_loadu_ps(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
__m128 vacc1x0123 = _mm_loadu_ps(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
const __m128 vbias0123 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, vbias0123);
vacc1x0123 = _mm_add_ps(vacc1x0123, vbias0123);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
if (c & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vacc0x0123);
_mm_storel_pi((__m64*) o1, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
_mm_store_ss(o0, vacc0x0123);
_mm_store_ss(o1, vacc1x0123);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,633
| 27.84127
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c4-minmax-wasm-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c4__wasm_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float vscale0 = w[0];
const float vscale1 = w[1];
const float vscale2 = w[2];
const float vscale3 = w[3];
float vacc0x0 = i0[0];
float vacc0x1 = i0[1];
float vacc0x2 = i0[2];
float vacc0x3 = i0[3];
i0 += 4;
float vacc1x0 = i1[0];
float vacc1x1 = i1[1];
float vacc1x2 = i1[2];
float vacc1x3 = i1[3];
i1 += 4;
const float vbias0 = w[4];
const float vbias1 = w[5];
const float vbias2 = w[6];
const float vbias3 = w[7];
vacc0x0 = vacc0x0 * vscale0 + vbias0;
vacc0x1 = vacc0x1 * vscale1 + vbias1;
vacc0x2 = vacc0x2 * vscale2 + vbias2;
vacc0x3 = vacc0x3 * vscale3 + vbias3;
vacc1x0 = vacc1x0 * vscale0 + vbias0;
vacc1x1 = vacc1x1 * vscale1 + vbias1;
vacc1x2 = vacc1x2 * vscale2 + vbias2;
vacc1x3 = vacc1x3 * vscale3 + vbias3;
vacc0x0 = __builtin_wasm_max_f32(vacc0x0, vmin);
vacc0x1 = __builtin_wasm_max_f32(vacc0x1, vmin);
vacc0x2 = __builtin_wasm_max_f32(vacc0x2, vmin);
vacc0x3 = __builtin_wasm_max_f32(vacc0x3, vmin);
vacc1x0 = __builtin_wasm_max_f32(vacc1x0, vmin);
vacc1x1 = __builtin_wasm_max_f32(vacc1x1, vmin);
vacc1x2 = __builtin_wasm_max_f32(vacc1x2, vmin);
vacc1x3 = __builtin_wasm_max_f32(vacc1x3, vmin);
vacc0x0 = __builtin_wasm_min_f32(vacc0x0, vmax);
vacc0x1 = __builtin_wasm_min_f32(vacc0x1, vmax);
vacc0x2 = __builtin_wasm_min_f32(vacc0x2, vmax);
vacc0x3 = __builtin_wasm_min_f32(vacc0x3, vmax);
vacc1x0 = __builtin_wasm_min_f32(vacc1x0, vmax);
vacc1x1 = __builtin_wasm_min_f32(vacc1x1, vmax);
vacc1x2 = __builtin_wasm_min_f32(vacc1x2, vmax);
vacc1x3 = __builtin_wasm_min_f32(vacc1x3, vmax);
o0[0] = vacc0x0;
o0[1] = vacc0x1;
o0[2] = vacc0x2;
o0[3] = vacc0x3;
o0 += 4;
o1[0] = vacc1x0;
o1[1] = vacc1x1;
o1[2] = vacc1x2;
o1[3] = vacc1x3;
o1 += 4;
w += 8;
}
if XNN_UNLIKELY(c != 0) {
do {
const float vscale = *w++;
float vacc0 = *i0++;
float vacc1 = *i1++;
const float vbias = w[3];
vacc0 = vacc0 * vscale + vbias;
vacc1 = vacc1 * vscale + vbias;
vacc0 = __builtin_wasm_max_f32(vacc0, vmin);
vacc1 = __builtin_wasm_max_f32(vacc1, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
vacc1 = __builtin_wasm_min_f32(vacc1, vmax);
*o0++ = vacc0;
*o1++ = vacc1;
c -= sizeof(float);
} while (c != 0);
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 4,195
| 28.758865
| 75
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c4-minmax-wasmrelaxedsimd-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c4__wasmrelaxedsimd_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vscale0123 = wasm_v128_load(w);
v128_t vacc0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vbias0123 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vscale0123, vacc0x0123), vbias0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vscale0123, vacc1x0123), vbias0123);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
w += 8;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vscale = wasm_v128_load(w);
v128_t vacc0 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc1 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const v128_t vbias = wasm_v128_load(w + 4);
vacc0 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc0), vbias);
vacc1 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc1), vbias);
vacc0 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0);
vacc1 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1);
vacc0 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0);
vacc1 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0, 0);
wasm_v128_store64_lane(o1, vacc1, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
vacc1 = wasm_v64x2_shuffle(vacc1, vacc1, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0, 0);
o0 += 1;
wasm_v128_store32_lane(o1, vacc1, 0);
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,700
| 30.10084
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c4-minmax-wasmrelaxedsimd-fma-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c4__wasmrelaxedsimd_fma_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vscale0123 = wasm_v128_load(w);
v128_t vacc0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vbias0123 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vscale0123, vacc0x0123, vbias0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vscale0123, vacc1x0123, vbias0123);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
w += 8;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vscale = wasm_v128_load(w);
v128_t vacc0 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc1 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const v128_t vbias = wasm_v128_load(w + 4);
vacc0 = __builtin_wasm_relaxed_madd_f32x4(vscale, vacc0, vbias);
vacc1 = __builtin_wasm_relaxed_madd_f32x4(vscale, vacc1, vbias);
vacc0 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0);
vacc1 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1);
vacc0 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0);
vacc1 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0, 0);
wasm_v128_store64_lane(o1, vacc1, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
vacc1 = wasm_v64x2_shuffle(vacc1, vacc1, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0, 0);
o0 += 1;
wasm_v128_store32_lane(o1, vacc1, 0);
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,716
| 30.235294
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c4-minmax-wasmsimd-arm-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c4__wasmsimd_arm_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vscale0123 = wasm_v128_load(w);
v128_t vacc0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vbias0123 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vscale0123, vacc0x0123), vbias0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vscale0123, vacc1x0123), vbias0123);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
w += 8;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vscale = wasm_v128_load(w);
v128_t vacc0 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc1 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const v128_t vbias = wasm_v128_load(w + 4);
vacc0 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc0), vbias);
vacc1 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc1), vbias);
vacc0 = wasm_f32x4_max(vmin, vacc0);
vacc1 = wasm_f32x4_max(vmin, vacc1);
vacc0 = wasm_f32x4_min(vmax, vacc0);
vacc1 = wasm_f32x4_min(vmax, vacc1);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0, 0);
wasm_v128_store64_lane(o1, vacc1, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
vacc1 = wasm_v64x2_shuffle(vacc1, vacc1, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0, 0);
o0 += 1;
wasm_v128_store32_lane(o1, vacc1, 0);
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,553
| 28.865546
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c4-minmax-wasmsimd-x86-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c4__wasmsimd_x86_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vscale0123 = wasm_v128_load(w);
v128_t vacc0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vbias0123 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vscale0123, vacc0x0123), vbias0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vscale0123, vacc1x0123), vbias0123);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
w += 8;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vscale = wasm_v128_load(w);
v128_t vacc0 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc1 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const v128_t vbias = wasm_v128_load(w + 4);
vacc0 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc0), vbias);
vacc1 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc1), vbias);
vacc0 = wasm_f32x4_pmax(vmin, vacc0);
vacc1 = wasm_f32x4_pmax(vmin, vacc1);
vacc0 = wasm_f32x4_pmin(vmax, vacc0);
vacc1 = wasm_f32x4_pmin(vmax, vacc1);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0, 0);
wasm_v128_store64_lane(o1, vacc1, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
vacc1 = wasm_v64x2_shuffle(vacc1, vacc1, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0, 0);
o0 += 1;
wasm_v128_store32_lane(o1, vacc1, 0);
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,561
| 28.932773
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c8-minmax-neon-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c8__neon_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const float32x4_t vscale0123 = vld1q_f32(w); w += 4;
const float32x4_t vscale4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vacc0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vacc1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vacc1x4567 = vld1q_f32(i1); i1 += 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
const float32x4_t vbias0123 = vld1q_f32(w); w += 4;
const float32x4_t vbias4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vaddq_f32(vacc0x0123, vbias0123);
vacc0x4567 = vaddq_f32(vacc0x4567, vbias4567);
vacc1x0123 = vaddq_f32(vacc1x0123, vbias0123);
vacc1x4567 = vaddq_f32(vacc1x4567, vbias4567);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o0, vacc0x4567); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
vst1q_f32(o1, vacc1x4567); o1 += 4;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float32x4_t vscale0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vacc1x0123 = vld1q_f32(i1); i1 += 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
const float32x4_t vbias0123 = vld1q_f32(w + 4);
vacc0x0123 = vaddq_f32(vacc0x0123, vbias0123);
vacc1x0123 = vaddq_f32(vacc1x0123, vbias0123);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const float32x4_t vscale0123 = vld1q_f32(w);
float32x4_t vacc0x0123 = vld1q_f32(i0); i0 = (const float*) ((uintptr_t) i0 + c);
float32x4_t vacc1x0123 = vld1q_f32(i1); i1 = (const float*) ((uintptr_t) i1 + c);
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
const float32x4_t vbias0123 = vld1q_f32(w + 8);
vacc0x0123 = vaddq_f32(vacc0x0123, vbias0123);
vacc1x0123 = vaddq_f32(vacc1x0123, vbias0123);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
if (c & (2 * sizeof(float))) {
vst1_f32(o0, vacc0x01); o0 += 2;
vst1_f32(o1, vacc1x01); o1 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
}
if (c & (1 * sizeof(float))) {
vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
vst1_lane_f32(o1, vacc1x01, 0); o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 5,137
| 33.02649
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c8-minmax-neonfma-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c8__neonfma_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const float32x4_t vscale0123 = vld1q_f32(w); w += 4;
const float32x4_t vscale4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vacc0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vacc1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vacc1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vbias0123 = vld1q_f32(w); w += 4;
const float32x4_t vbias4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vbias0123, vscale0123, vacc0x0123);
vacc0x4567 = vfmaq_f32(vbias4567, vscale4567, vacc0x4567);
vacc1x0123 = vfmaq_f32(vbias0123, vscale0123, vacc1x0123);
vacc1x4567 = vfmaq_f32(vbias4567, vscale4567, vacc1x4567);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o0, vacc0x4567); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
vst1q_f32(o1, vacc1x4567); o1 += 4;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float32x4_t vscale0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vacc1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vbias0123 = vld1q_f32(w + 4);
vacc0x0123 = vfmaq_f32(vbias0123, vscale0123, vacc0x0123);
vacc1x0123 = vfmaq_f32(vbias0123, vscale0123, vacc1x0123);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const float32x4_t vscale0123 = vld1q_f32(w);
float32x4_t vacc0x0123 = vld1q_f32(i0); i0 = (const float*) ((uintptr_t) i0 + c);
float32x4_t vacc1x0123 = vld1q_f32(i1); i1 = (const float*) ((uintptr_t) i1 + c);
const float32x4_t vbias0123 = vld1q_f32(w + 8);
vacc0x0123 = vfmaq_f32(vbias0123, vscale0123, vacc0x0123);
vacc1x0123 = vfmaq_f32(vbias0123, vscale0123, vacc1x0123);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
if (c & (2 * sizeof(float))) {
vst1_f32(o0, vacc0x01); o0 += 2;
vst1_f32(o1, vacc1x01); o1 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
}
if (c & (1 * sizeof(float))) {
vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
vst1_lane_f32(o1, vacc1x01, 0); o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 4,804
| 32.601399
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c8-minmax-sse-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c8__sse_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const __m128 vmin = _mm_load_ps(params->sse.min);
const __m128 vmax = _mm_load_ps(params->sse.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const __m128 vscale0123 = _mm_load_ps(w);
const __m128 vscale4567 = _mm_load_ps(w + 4);
__m128 vacc0x0123 = _mm_loadu_ps(i0);
__m128 vacc0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
__m128 vacc1x0123 = _mm_loadu_ps(i1);
__m128 vacc1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
const __m128 vbias0123 = _mm_load_ps(w + 8);
const __m128 vbias4567 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, vbias0123);
vacc0x4567 = _mm_add_ps(vacc0x4567, vbias4567);
vacc1x0123 = _mm_add_ps(vacc1x0123, vbias0123);
vacc1x4567 = _mm_add_ps(vacc1x4567, vbias4567);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
_mm_storeu_ps(o0, vacc0x0123);
_mm_storeu_ps(o0 + 4, vacc0x4567);
o0 += 8;
_mm_storeu_ps(o1, vacc1x0123);
_mm_storeu_ps(o1 + 4, vacc1x4567);
o1 += 8;
w += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const __m128 vscale0123 = _mm_load_ps(w);
__m128 vacc0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vacc1x0123 = _mm_loadu_ps(i1);
i1 += 4;
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
const __m128 vbias0123 = _mm_load_ps(w + 8);
vacc0x0123 = _mm_add_ps(vacc0x0123, vbias0123);
vacc1x0123 = _mm_add_ps(vacc1x0123, vbias0123);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
_mm_storeu_ps(o0, vacc0x0123);
o0 += 4;
_mm_storeu_ps(o1, vacc1x0123);
o1 += 4;
w += 4;
}
if XNN_UNLIKELY(c != 0) {
const __m128 vscale0123 = _mm_load_ps(w);
__m128 vacc0x0123 = _mm_loadu_ps(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
__m128 vacc1x0123 = _mm_loadu_ps(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
const __m128 vbias0123 = _mm_load_ps(w + 8);
vacc0x0123 = _mm_add_ps(vacc0x0123, vbias0123);
vacc1x0123 = _mm_add_ps(vacc1x0123, vbias0123);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
if (c & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vacc0x0123);
_mm_storel_pi((__m64*) o1, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
_mm_store_ss(o0, vacc0x0123);
_mm_store_ss(o1, vacc1x0123);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 5,154
| 29.502959
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c8-minmax-wasmrelaxedsimd-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmrelaxedsimd_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vscale0123 = wasm_v128_load(w);
const v128_t vscale4567 = wasm_v128_load(w + 4);
v128_t vacc0x0123 = wasm_v128_load(i0);
v128_t vacc0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vacc1x0123 = wasm_v128_load(i1);
v128_t vacc1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vbias0123 = wasm_v128_load(w + 8);
const v128_t vbias4567 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vscale0123, vacc0x0123), vbias0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(vscale4567, vacc0x4567), vbias4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vscale0123, vacc1x0123), vbias0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(vscale4567, vacc1x4567), vbias4567);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
o1 += 8;
w += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vscale = wasm_v128_load(w);
v128_t vacc0 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vbias = wasm_v128_load(w + 8);
vacc0 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc0), vbias);
vacc1 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc1), vbias);
vacc0 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0);
vacc1 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1);
vacc0 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0);
vacc1 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1);
wasm_v128_store(o0, vacc0);
o0 += 4;
wasm_v128_store(o1, vacc1);
o1 += 4;
w += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vscale = wasm_v128_load(w);
v128_t vacc0 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc1 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const v128_t vbias = wasm_v128_load(w + 8);
vacc0 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc0), vbias);
vacc1 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc1), vbias);
vacc0 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0);
vacc1 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1);
vacc0 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0);
vacc1 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0, 0);
wasm_v128_store64_lane(o1, vacc1, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
vacc1 = wasm_v64x2_shuffle(vacc1, vacc1, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0, 0);
o0 += 1;
wasm_v128_store32_lane(o1, vacc1, 0);
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 5,228
| 32.305732
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c8-minmax-wasmrelaxedsimd-fma-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmrelaxedsimd_fma_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vscale0123 = wasm_v128_load(w);
const v128_t vscale4567 = wasm_v128_load(w + 4);
v128_t vacc0x0123 = wasm_v128_load(i0);
v128_t vacc0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vacc1x0123 = wasm_v128_load(i1);
v128_t vacc1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vbias0123 = wasm_v128_load(w + 8);
const v128_t vbias4567 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vscale0123, vacc0x0123, vbias0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(vscale4567, vacc0x4567, vbias4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vscale0123, vacc1x0123, vbias0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(vscale4567, vacc1x4567, vbias4567);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
o1 += 8;
w += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vscale = wasm_v128_load(w);
v128_t vacc0 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vbias = wasm_v128_load(w + 8);
vacc0 = __builtin_wasm_relaxed_madd_f32x4(vscale, vacc0, vbias);
vacc1 = __builtin_wasm_relaxed_madd_f32x4(vscale, vacc1, vbias);
vacc0 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0);
vacc1 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1);
vacc0 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0);
vacc1 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1);
wasm_v128_store(o0, vacc0);
o0 += 4;
wasm_v128_store(o1, vacc1);
o1 += 4;
w += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vscale = wasm_v128_load(w);
v128_t vacc0 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc1 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const v128_t vbias = wasm_v128_load(w + 8);
vacc0 = __builtin_wasm_relaxed_madd_f32x4(vscale, vacc0, vbias);
vacc1 = __builtin_wasm_relaxed_madd_f32x4(vscale, vacc1, vbias);
vacc0 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0);
vacc1 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1);
vacc0 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0);
vacc1 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0, 0);
wasm_v128_store64_lane(o1, vacc1, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
vacc1 = wasm_v64x2_shuffle(vacc1, vacc1, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0, 0);
o0 += 1;
wasm_v128_store32_lane(o1, vacc1, 0);
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 5,256
| 32.484076
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c8-minmax-wasmsimd-arm-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmsimd_arm_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vscale0123 = wasm_v128_load(w);
const v128_t vscale4567 = wasm_v128_load(w + 4);
v128_t vacc0x0123 = wasm_v128_load(i0);
v128_t vacc0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vacc1x0123 = wasm_v128_load(i1);
v128_t vacc1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vbias0123 = wasm_v128_load(w + 8);
const v128_t vbias4567 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vscale0123, vacc0x0123), vbias0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(vscale4567, vacc0x4567), vbias4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vscale0123, vacc1x0123), vbias0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(vscale4567, vacc1x4567), vbias4567);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
o1 += 8;
w += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vscale = wasm_v128_load(w);
v128_t vacc0 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vbias = wasm_v128_load(w + 8);
vacc0 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc0), vbias);
vacc1 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc1), vbias);
vacc0 = wasm_f32x4_max(vmin, vacc0);
vacc1 = wasm_f32x4_max(vmin, vacc1);
vacc0 = wasm_f32x4_min(vmax, vacc0);
vacc1 = wasm_f32x4_min(vmax, vacc1);
wasm_v128_store(o0, vacc0);
o0 += 4;
wasm_v128_store(o1, vacc1);
o1 += 4;
w += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vscale = wasm_v128_load(w);
v128_t vacc0 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc1 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const v128_t vbias = wasm_v128_load(w + 8);
vacc0 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc0), vbias);
vacc1 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc1), vbias);
vacc0 = wasm_f32x4_max(vmin, vacc0);
vacc1 = wasm_f32x4_max(vmin, vacc1);
vacc0 = wasm_f32x4_min(vmax, vacc0);
vacc1 = wasm_f32x4_min(vmax, vacc1);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0, 0);
wasm_v128_store64_lane(o1, vacc1, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
vacc1 = wasm_v64x2_shuffle(vacc1, vacc1, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0, 0);
o0 += 1;
wasm_v128_store32_lane(o1, vacc1, 0);
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 4,937
| 30.452229
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c8-minmax-wasmsimd-x86-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmsimd_x86_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vscale0123 = wasm_v128_load(w);
const v128_t vscale4567 = wasm_v128_load(w + 4);
v128_t vacc0x0123 = wasm_v128_load(i0);
v128_t vacc0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vacc1x0123 = wasm_v128_load(i1);
v128_t vacc1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vbias0123 = wasm_v128_load(w + 8);
const v128_t vbias4567 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vscale0123, vacc0x0123), vbias0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(vscale4567, vacc0x4567), vbias4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vscale0123, vacc1x0123), vbias0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(vscale4567, vacc1x4567), vbias4567);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
o1 += 8;
w += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vscale = wasm_v128_load(w);
v128_t vacc0 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vbias = wasm_v128_load(w + 8);
vacc0 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc0), vbias);
vacc1 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc1), vbias);
vacc0 = wasm_f32x4_pmax(vmin, vacc0);
vacc1 = wasm_f32x4_pmax(vmin, vacc1);
vacc0 = wasm_f32x4_pmin(vmax, vacc0);
vacc1 = wasm_f32x4_pmin(vmax, vacc1);
wasm_v128_store(o0, vacc0);
o0 += 4;
wasm_v128_store(o1, vacc1);
o1 += 4;
w += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vscale = wasm_v128_load(w);
v128_t vacc0 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc1 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const v128_t vbias = wasm_v128_load(w + 8);
vacc0 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc0), vbias);
vacc1 = wasm_f32x4_add(wasm_f32x4_mul(vscale, vacc1), vbias);
vacc0 = wasm_f32x4_pmax(vmin, vacc0);
vacc1 = wasm_f32x4_pmax(vmin, vacc1);
vacc0 = wasm_f32x4_pmin(vmax, vacc0);
vacc1 = wasm_f32x4_pmin(vmax, vacc1);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0, 0);
wasm_v128_store64_lane(o1, vacc1, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
vacc1 = wasm_v64x2_shuffle(vacc1, vacc1, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0, 0);
o0 += 1;
wasm_v128_store32_lane(o1, vacc1, 0);
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 4,953
| 30.55414
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vrelu_ukernel__avx_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vzero = _mm256_setzero_ps();
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vacc01234567 = _mm256_loadu_ps(input);
__m256 vacc89ABCDEF = _mm256_loadu_ps(input + 8);
input += 16;
vacc01234567 = _mm256_max_ps(vacc01234567, vzero);
vacc89ABCDEF = _mm256_max_ps(vacc89ABCDEF, vzero);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input);
input += 8;
vacc = _mm256_max_ps(vacc, vzero);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
__m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input, vmask);
vacc = _mm256_max_ps(vacc, vzero);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,183
| 27
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vrelu_ukernel__avx_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vzero = _mm256_setzero_ps();
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc01234567 = _mm256_loadu_ps(input);
input += 8;
vacc01234567 = _mm256_max_ps(vacc01234567, vzero);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
__m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input, vmask);
vacc = _mm256_max_ps(vacc, vzero);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 1,798
| 26.257576
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_vrelu_ukernel__avx512f_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vzero = _mm512_setzero_ps();
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc0123456789ABCDEF = _mm512_loadu_ps(input);
input += 16;
vacc0123456789ABCDEF = _mm512_max_ps(vacc0123456789ABCDEF, vzero);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input);
vacc = _mm512_max_ps(vacc, vzero);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,516
| 27.622642
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_vrelu_ukernel__avx512f_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vzero = _mm512_setzero_ps();
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vacc0123456789ABCDEF = _mm512_loadu_ps(input);
__m512 vaccGHIJKLMNOPQRSTUV = _mm512_loadu_ps(input + 16);
input += 32;
vacc0123456789ABCDEF = _mm512_max_ps(vacc0123456789ABCDEF, vzero);
vaccGHIJKLMNOPQRSTUV = _mm512_max_ps(vaccGHIJKLMNOPQRSTUV, vzero);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vaccGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input);
input += 16;
vacc = _mm512_max_ps(vacc, vzero);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input);
vacc = _mm512_max_ps(vacc, vzero);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,934
| 28.769231
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_vrelu_ukernel__neon_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vzero = vmovq_n_f32(0.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vacc0123 = vld1q_f32(input); input += 4;
vacc0123 = vmaxq_f32(vacc0123, vzero);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vacc = vld1q_f32(input);
vacc = vmaxq_f32(vacc, vzero);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,316
| 24.326923
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_vrelu_ukernel__neon_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vzero = vmovq_n_f32(0.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vacc0123 = vld1q_f32(input); input += 4;
float32x4_t vacc4567 = vld1q_f32(input); input += 4;
vacc0123 = vmaxq_f32(vacc0123, vzero);
vacc4567 = vmaxq_f32(vacc4567, vzero);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vacc = vld1q_f32(input); input += 4;
vacc = vmaxq_f32(vacc, vzero);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vacc = vld1q_f32(input);
vacc = vmaxq_f32(vacc, vzero);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,663
| 26.733333
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
void xnn_f32_vrelu_ukernel__scalar_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32_t* i = (const uint32_t*) input;
uint32_t* o = (uint32_t*) output;
for (; batch >= sizeof(uint32_t); batch -= sizeof(uint32_t)) {
uint32_t vacc = *i++;
vacc = ((vacc >> 31) - 1) & vacc;
*o++ = vacc;
}
}
| 903
| 24.111111
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
void xnn_f32_vrelu_ukernel__scalar_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32_t* i = (const uint32_t*) input;
uint32_t* o = (uint32_t*) output;
for (; batch >= 2 * sizeof(uint32_t); batch -= 2 * sizeof(uint32_t)) {
uint32_t vacc0 = i[0];
uint32_t vacc1 = i[1];
i += 2;
vacc0 = ((vacc0 >> 31) - 1) & vacc0;
vacc1 = ((vacc1 >> 31) - 1) & vacc1;
o[0] = vacc0;
o[1] = vacc1;
o += 2;
}
if XNN_UNLIKELY(batch != 0) {
uint32_t vacc = *i;
vacc = ((vacc >> 31) - 1) & vacc;
*o = vacc;
}
}
| 1,141
| 22.791667
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
void xnn_f32_vrelu_ukernel__scalar_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32_t* i = (const uint32_t*) input;
uint32_t* o = (uint32_t*) output;
for (; batch >= 4 * sizeof(uint32_t); batch -= 4 * sizeof(uint32_t)) {
uint32_t vacc0 = i[0];
uint32_t vacc1 = i[1];
uint32_t vacc2 = i[2];
uint32_t vacc3 = i[3];
i += 4;
vacc0 = ((vacc0 >> 31) - 1) & vacc0;
vacc1 = ((vacc1 >> 31) - 1) & vacc1;
vacc2 = ((vacc2 >> 31) - 1) & vacc2;
vacc3 = ((vacc3 >> 31) - 1) & vacc3;
o[0] = vacc0;
o[1] = vacc1;
o[2] = vacc2;
o[3] = vacc3;
o += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
uint32_t vacc = *i++;
vacc = ((vacc >> 31) - 1) & vacc;
*o++ = vacc;
batch -= sizeof(uint32_t);
} while (batch != 0);
}
}
| 1,391
| 23.421053
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
void xnn_f32_vrelu_ukernel__scalar_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32_t* i = (const uint32_t*) input;
uint32_t* o = (uint32_t*) output;
for (; batch >= 8 * sizeof(uint32_t); batch -= 8 * sizeof(uint32_t)) {
uint32_t vacc0 = i[0];
uint32_t vacc1 = i[1];
uint32_t vacc2 = i[2];
uint32_t vacc3 = i[3];
uint32_t vacc4 = i[4];
uint32_t vacc5 = i[5];
uint32_t vacc6 = i[6];
uint32_t vacc7 = i[7];
i += 8;
vacc0 = ((vacc0 >> 31) - 1) & vacc0;
vacc1 = ((vacc1 >> 31) - 1) & vacc1;
vacc2 = ((vacc2 >> 31) - 1) & vacc2;
vacc3 = ((vacc3 >> 31) - 1) & vacc3;
vacc4 = ((vacc4 >> 31) - 1) & vacc4;
vacc5 = ((vacc5 >> 31) - 1) & vacc5;
vacc6 = ((vacc6 >> 31) - 1) & vacc6;
vacc7 = ((vacc7 >> 31) - 1) & vacc7;
o[0] = vacc0;
o[1] = vacc1;
o[2] = vacc2;
o[3] = vacc3;
o[4] = vacc4;
o[5] = vacc5;
o[6] = vacc6;
o[7] = vacc7;
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
uint32_t vacc = *i++;
vacc = ((vacc >> 31) - 1) & vacc;
*o++ = vacc;
batch -= sizeof(uint32_t);
} while (batch != 0);
}
}
| 1,735
| 24.15942
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_vrelu_ukernel__sse_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vzero = _mm_setzero_ps();
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vacc0123 = _mm_loadu_ps(input);
input += 4;
vacc0123 = _mm_max_ps(vacc0123, vzero);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vacc = _mm_loadu_ps(input);
vacc = _mm_max_ps(vacc, vzero);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,286
| 22.833333
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_vrelu_ukernel__sse_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vzero = _mm_setzero_ps();
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vacc0123 = _mm_loadu_ps(input);
__m128 vacc4567 = _mm_loadu_ps(input + 4);
input += 8;
vacc0123 = _mm_max_ps(vacc0123, vzero);
vacc4567 = _mm_max_ps(vacc4567, vzero);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vacc = _mm_loadu_ps(input);
input += 4;
vacc = _mm_max_ps(vacc, vzero);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vacc = _mm_loadu_ps(input);
vacc = _mm_max_ps(vacc, vzero);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,632
| 23.742424
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/wasm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
void xnn_f32_vrelu_ukernel__wasm_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vzero = 0.0f;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
float vacc = *input++;
vacc = __builtin_wasm_max_f32(vacc, vzero);
*output++ = vacc;
}
}
| 854
| 22.75
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/wasm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
void xnn_f32_vrelu_ukernel__wasm_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vzero = 0.0f;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vacc0 = input[0];
float vacc1 = input[1];
input += 2;
vacc0 = __builtin_wasm_max_f32(vacc0, vzero);
vacc1 = __builtin_wasm_max_f32(vacc1, vzero);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vacc = *input;
vacc = __builtin_wasm_max_f32(vacc, vzero);
*output = vacc;
}
}
| 1,131
| 22.583333
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/wasm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
void xnn_f32_vrelu_ukernel__wasm_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vzero = 0.0f;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vacc0 = input[0];
float vacc1 = input[1];
float vacc2 = input[2];
float vacc3 = input[3];
input += 4;
vacc0 = __builtin_wasm_max_f32(vacc0, vzero);
vacc1 = __builtin_wasm_max_f32(vacc1, vzero);
vacc2 = __builtin_wasm_max_f32(vacc2, vzero);
vacc3 = __builtin_wasm_max_f32(vacc3, vzero);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vacc = *input++;
vacc = __builtin_wasm_max_f32(vacc, vzero);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,408
| 23.719298
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-wasm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/wasm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
void xnn_f32_vrelu_ukernel__wasm_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vzero = 0.0f;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float vacc0 = input[0];
float vacc1 = input[1];
float vacc2 = input[2];
float vacc3 = input[3];
float vacc4 = input[4];
float vacc5 = input[5];
float vacc6 = input[6];
float vacc7 = input[7];
input += 8;
vacc0 = __builtin_wasm_max_f32(vacc0, vzero);
vacc1 = __builtin_wasm_max_f32(vacc1, vzero);
vacc2 = __builtin_wasm_max_f32(vacc2, vzero);
vacc3 = __builtin_wasm_max_f32(vacc3, vzero);
vacc4 = __builtin_wasm_max_f32(vacc4, vzero);
vacc5 = __builtin_wasm_max_f32(vacc5, vzero);
vacc6 = __builtin_wasm_max_f32(vacc6, vzero);
vacc7 = __builtin_wasm_max_f32(vacc7, vzero);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vacc = *input++;
vacc = __builtin_wasm_max_f32(vacc, vzero);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,812
| 25.275362
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_vrelu_ukernel__wasmsimd_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
v128_t vacc0123 = wasm_v128_load(input);
v128_t vacc4567 = wasm_v128_load(input + 4);
v128_t vacc89AB = wasm_v128_load(input + 8);
v128_t vaccCDEF = wasm_v128_load(input + 12);
input += 16;
vacc0123 = wasm_i32x4_max(vacc0123, vzero);
vacc4567 = wasm_i32x4_max(vacc4567, vzero);
vacc89AB = wasm_i32x4_max(vacc89AB, vzero);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vzero);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
wasm_v128_store(output + 8, vacc89AB);
wasm_v128_store(output + 12, vaccCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vacc = wasm_v128_load(input);
input += 4;
vacc = wasm_i32x4_max(vacc, vzero);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vacc = wasm_v128_load(input);
vacc = wasm_i32x4_max(vacc, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,999
| 26.39726
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_vrelu_ukernel__wasmsimd_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vacc = wasm_v128_load(input);
input += 4;
vacc = wasm_i32x4_max(vacc, vzero);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vacc = wasm_v128_load(input);
vacc = wasm_i32x4_max(vacc, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,334
| 23.272727
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrelu/gen/f32-vrelu-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrelu/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_vrelu_ukernel__wasmsimd_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vacc0123 = wasm_v128_load(input);
v128_t vacc4567 = wasm_v128_load(input + 4);
input += 8;
vacc0123 = wasm_i32x4_max(vacc0123, vzero);
vacc4567 = wasm_i32x4_max(vacc4567, vzero);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vacc = wasm_v128_load(input);
input += 4;
vacc = wasm_i32x4_max(vacc, vzero);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vacc = wasm_v128_load(input);
vacc = wasm_i32x4_max(vacc, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,712
| 24.567164
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__avx_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
const __m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vy01234567 = _mm256_round_ps(vx01234567, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
const __m256 vy89ABCDEF = _mm256_round_ps(vx89ABCDEF, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 2,268
| 29.253333
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__avx_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
input += 8;
const __m256 vy01234567 = _mm256_round_ps(vx01234567, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
_mm256_storeu_ps(output, vy01234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 1,785
| 27.349206
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__avx512f_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx0123456789ABCDEF = _mm512_loadu_ps(input);
input += 16;
const __m512 vy0123456789ABCDEF = _mm512_roundscale_ps(vx0123456789ABCDEF, _MM_FROUND_TO_NEG_INF);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vy = _mm512_maskz_roundscale_ps(vmask, vx, _MM_FROUND_TO_NEG_INF);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 1,576
| 29.326923
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__avx512f_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0123456789ABCDEF = _mm512_loadu_ps(input);
const __m512 vxGHIJKLMNOPQRSTUV = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vy0123456789ABCDEF = _mm512_roundscale_ps(vx0123456789ABCDEF, _MM_FROUND_TO_NEG_INF);
const __m512 vyGHIJKLMNOPQRSTUV = _mm512_roundscale_ps(vxGHIJKLMNOPQRSTUV, _MM_FROUND_TO_NEG_INF);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vy = _mm512_roundscale_ps(vx, _MM_FROUND_TO_NEG_INF);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vy = _mm512_maskz_roundscale_ps(vmask, vx, _MM_FROUND_TO_NEG_INF);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 2,062
| 31.234375
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndd-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__neon_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vintegral_threshold = vreinterpretq_f32_u32(vmovq_n_u32(UINT32_C(0x4B000000)));
const uint32x4_t vone = vreinterpretq_u32_f32(vmovq_n_f32(1.0f));
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const int32x4_t vintx0123 = vcvtq_s32_f32(vx0123);
uint32x4_t vrndmask0123 = vcaltq_f32(vx0123, vintegral_threshold);
const float32x4_t vprerndx0123 = vcvtq_f32_s32(vintx0123);
vrndmask0123 = vbicq_u32(vrndmask0123, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vrndx0123 = vbslq_f32(vrndmask0123, vprerndx0123, vx0123);
const uint32x4_t vadjmask0123 = vcgtq_f32(vrndx0123, vx0123);
const float32x4_t vadjrndx0123 = vreinterpretq_f32_u32(vandq_u32(vadjmask0123, vone));
const float32x4_t vy0123 = vsubq_f32(vrndx0123, vadjrndx0123);
vst1q_f32(output, vy0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const int32x4_t vintx = vcvtq_s32_f32(vx);
uint32x4_t vrndmask = vcaltq_f32(vx, vintegral_threshold);
const float32x4_t vprerndx = vcvtq_f32_s32(vintx);
vrndmask = vbicq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vrndx = vbslq_f32(vrndmask, vprerndx, vx);
const uint32x4_t vadjmask = vcgtq_f32(vrndx, vx);
const float32x4_t vadjrndx = vreinterpretq_f32_u32(vandq_u32(vadjmask, vone));
const float32x4_t vy = vsubq_f32(vrndx, vadjrndx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 2,454
| 32.630137
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndd-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__neon_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vintegral_threshold = vreinterpretq_f32_u32(vmovq_n_u32(UINT32_C(0x4B000000)));
const uint32x4_t vone = vreinterpretq_u32_f32(vmovq_n_f32(1.0f));
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const int32x4_t vintx0123 = vcvtq_s32_f32(vx0123);
const int32x4_t vintx4567 = vcvtq_s32_f32(vx4567);
uint32x4_t vrndmask0123 = vcaltq_f32(vx0123, vintegral_threshold);
uint32x4_t vrndmask4567 = vcaltq_f32(vx4567, vintegral_threshold);
const float32x4_t vprerndx0123 = vcvtq_f32_s32(vintx0123);
const float32x4_t vprerndx4567 = vcvtq_f32_s32(vintx4567);
vrndmask0123 = vbicq_u32(vrndmask0123, vmovq_n_u32(UINT32_C(0x80000000)));
vrndmask4567 = vbicq_u32(vrndmask4567, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vrndx0123 = vbslq_f32(vrndmask0123, vprerndx0123, vx0123);
const float32x4_t vrndx4567 = vbslq_f32(vrndmask4567, vprerndx4567, vx4567);
const uint32x4_t vadjmask0123 = vcgtq_f32(vrndx0123, vx0123);
const uint32x4_t vadjmask4567 = vcgtq_f32(vrndx4567, vx4567);
const float32x4_t vadjrndx0123 = vreinterpretq_f32_u32(vandq_u32(vadjmask0123, vone));
const float32x4_t vadjrndx4567 = vreinterpretq_f32_u32(vandq_u32(vadjmask4567, vone));
const float32x4_t vy0123 = vsubq_f32(vrndx0123, vadjrndx0123);
const float32x4_t vy4567 = vsubq_f32(vrndx4567, vadjrndx4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const int32x4_t vintx = vcvtq_s32_f32(vx);
uint32x4_t vrndmask = vcaltq_f32(vx, vintegral_threshold);
const float32x4_t vprerndx = vcvtq_f32_s32(vintx);
vrndmask = vbicq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vrndx = vbslq_f32(vrndmask, vprerndx, vx);
const uint32x4_t vadjmask = vcgtq_f32(vrndx, vx);
const float32x4_t vadjrndx = vreinterpretq_f32_u32(vandq_u32(vadjmask, vone));
const float32x4_t vy = vsubq_f32(vrndx, vadjrndx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const int32x4_t vintx = vcvtq_s32_f32(vx);
uint32x4_t vrndmask = vcaltq_f32(vx, vintegral_threshold);
const float32x4_t vprerndx = vcvtq_f32_s32(vintx);
vrndmask = vbicq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vrndx = vbslq_f32(vrndmask, vprerndx, vx);
const uint32x4_t vadjmask = vcgtq_f32(vrndx, vx);
const float32x4_t vadjrndx = vreinterpretq_f32_u32(vandq_u32(vadjmask, vone));
const float32x4_t vy = vsubq_f32(vrndx, vadjrndx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 3,793
| 38.936842
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-neonv8-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__neonv8_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vy0123 = vrndmq_f32(vx0123);
vst1q_f32(output, vy0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vy = vrndmq_f32(vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 1,305
| 25.12
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-neonv8-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__neonv8_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vy0123 = vrndmq_f32(vx0123);
const float32x4_t vy4567 = vrndmq_f32(vx4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vy = vrndmq_f32(vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vy = vrndmq_f32(vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 1,672
| 27.844828
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-scalar-libm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/scalar-libm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__scalar_libm_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
do {
const float vx = *input++;
const float vy = floorf(vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 838
| 22.305556
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-scalar-libm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/scalar-libm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__scalar_libm_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
const float vy0 = floorf(vx0);
const float vy1 = floorf(vx1);
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
const float vy = floorf(vx);
*output = vy;
}
}
| 1,093
| 22.276596
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-scalar-libm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/scalar-libm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__scalar_libm_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
const float vy0 = floorf(vx0);
const float vy1 = floorf(vx1);
const float vy2 = floorf(vx2);
const float vy3 = floorf(vx3);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
const float vy = floorf(vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,344
| 23.017857
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-sse2-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndd-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__sse2_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmagic = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
const __m128 vone = _mm_load_ps(params->sse2.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
input += 4;
const __m128i vintx0123 = _mm_cvttps_epi32(vx0123);
const __m128 vrndmask0123 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx0123, vmagic)));
const __m128 vprerndx0123 = _mm_cvtepi32_ps(vintx0123);
const __m128 vrndx0123 = _mm_or_ps(_mm_and_ps(vx0123, vrndmask0123), _mm_andnot_ps(vrndmask0123, vprerndx0123));
const __m128 vy0123 = _mm_sub_ps(vrndx0123, _mm_and_ps(_mm_cmpgt_ps(vrndx0123, vx0123), vone));
_mm_storeu_ps(output, vy0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128i vintx = _mm_cvttps_epi32(vx);
const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic)));
const __m128 vprerndx = _mm_cvtepi32_ps(vintx);
const __m128 vrndx = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vprerndx));
__m128 vy = _mm_sub_ps(vrndx, _mm_and_ps(_mm_cmpgt_ps(vrndx, vx), vone));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 2,146
| 31.530303
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-sse2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndd-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__sse2_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmagic = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
const __m128 vone = _mm_load_ps(params->sse2.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128i vintx0123 = _mm_cvttps_epi32(vx0123);
const __m128i vintx4567 = _mm_cvttps_epi32(vx4567);
const __m128 vrndmask0123 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx0123, vmagic)));
const __m128 vrndmask4567 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx4567, vmagic)));
const __m128 vprerndx0123 = _mm_cvtepi32_ps(vintx0123);
const __m128 vprerndx4567 = _mm_cvtepi32_ps(vintx4567);
const __m128 vrndx0123 = _mm_or_ps(_mm_and_ps(vx0123, vrndmask0123), _mm_andnot_ps(vrndmask0123, vprerndx0123));
const __m128 vrndx4567 = _mm_or_ps(_mm_and_ps(vx4567, vrndmask4567), _mm_andnot_ps(vrndmask4567, vprerndx4567));
const __m128 vy0123 = _mm_sub_ps(vrndx0123, _mm_and_ps(_mm_cmpgt_ps(vrndx0123, vx0123), vone));
const __m128 vy4567 = _mm_sub_ps(vrndx4567, _mm_and_ps(_mm_cmpgt_ps(vrndx4567, vx4567), vone));
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128i vintx = _mm_cvttps_epi32(vx);
const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic)));
const __m128 vprerndx = _mm_cvtepi32_ps(vintx);
const __m128 vrndx = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vprerndx));
const __m128 vy = _mm_sub_ps(vrndx, _mm_and_ps(_mm_cmpgt_ps(vrndx, vx), vone));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128i vintx = _mm_cvttps_epi32(vx);
const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic)));
const __m128 vprerndx = _mm_cvtepi32_ps(vintx);
const __m128 vrndx = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vprerndx));
__m128 vy = _mm_sub_ps(vrndx, _mm_and_ps(_mm_cmpgt_ps(vrndx, vx), vone));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 3,238
| 36.662791
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-sse41-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/sse41.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__sse41_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
input += 4;
const __m128 vy0123 = _mm_round_ps(vx0123, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
_mm_storeu_ps(output, vy0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,355
| 25.076923
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-sse41-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/sse41.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__sse41_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vy0123 = _mm_round_ps(vx0123, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
const __m128 vy4567 = _mm_round_ps(vx4567, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,799
| 27.125
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__wasmsimd_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input); input += 4;
const v128_t vy0123 = wasm_f32x4_floor(vx0123);
wasm_v128_store(output, vy0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_f32x4_floor(vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,312
| 24.745098
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndd-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndd_ukernel__wasmsimd_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input); input += 4;
const v128_t vx4567 = wasm_v128_load(input); input += 4;
const v128_t vy0123 = wasm_f32x4_floor(vx0123);
const v128_t vy4567 = wasm_f32x4_floor(vx4567);
wasm_v128_store(output, vy0123); output += 4;
wasm_v128_store(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input); input += 4;
const v128_t vy = wasm_f32x4_floor(vx);
wasm_v128_store(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_f32x4_floor(vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,693
| 27.711864
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__avx_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
const __m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vy01234567 = _mm256_round_ps(vx01234567, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vy89ABCDEF = _mm256_round_ps(vx89ABCDEF, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 2,285
| 29.48
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__avx_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
input += 8;
const __m256 vy01234567 = _mm256_round_ps(vx01234567, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
_mm256_storeu_ps(output, vy01234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 1,794
| 27.492063
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__avx512f_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx0123456789ABCDEF = _mm512_loadu_ps(input);
input += 16;
const __m512 vy0123456789ABCDEF = _mm512_roundscale_ps(vx0123456789ABCDEF, _MM_FROUND_TO_NEAREST_INT);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vy = _mm512_maskz_roundscale_ps(vmask, vx, _MM_FROUND_TO_NEAREST_INT);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 1,585
| 29.5
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__avx512f_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0123456789ABCDEF = _mm512_loadu_ps(input);
const __m512 vxGHIJKLMNOPQRSTUV = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vy0123456789ABCDEF = _mm512_roundscale_ps(vx0123456789ABCDEF, _MM_FROUND_TO_NEAREST_INT);
const __m512 vyGHIJKLMNOPQRSTUV = _mm512_roundscale_ps(vxGHIJKLMNOPQRSTUV, _MM_FROUND_TO_NEAREST_INT);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vy = _mm512_roundscale_ps(vx, _MM_FROUND_TO_NEAREST_INT);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vy = _mm512_maskz_roundscale_ps(vmask, vx, _MM_FROUND_TO_NEAREST_INT);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 2,079
| 31.5
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndne-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__neon_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_number = vreinterpretq_f32_u32(vmovq_n_u32(UINT32_C(0x4B000000)));
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vabsx0123 = vabsq_f32(vx0123);
uint32x4_t vrndmask0123 = vcaltq_f32(vmagic_number, vx0123);
float32x4_t vrndabsx0123 = vaddq_f32(vabsx0123, vmagic_number);
vrndmask0123 = vorrq_u32(vrndmask0123, vmovq_n_u32(UINT32_C(0x80000000)));
vrndabsx0123 = vsubq_f32(vrndabsx0123, vmagic_number);
const float32x4_t vy0123 = vbslq_f32(vrndmask0123, vx0123, vrndabsx0123);
vst1q_f32(output, vy0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vabsx = vabsq_f32(vx);
uint32x4_t vrndmask = vcaltq_f32(vmagic_number, vx);
float32x4_t vrndabsx = vaddq_f32(vabsx, vmagic_number);
vrndmask = vorrq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
vrndabsx = vsubq_f32(vrndabsx, vmagic_number);
const float32x4_t vy = vbslq_f32(vrndmask, vx, vrndabsx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 2,061
| 30.723077
| 93
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndne-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__neon_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_number = vreinterpretq_f32_u32(vmovq_n_u32(UINT32_C(0x4B000000)));
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vabsx0123 = vabsq_f32(vx0123);
uint32x4_t vrndmask0123 = vcaltq_f32(vmagic_number, vx0123);
const float32x4_t vabsx4567 = vabsq_f32(vx4567);
uint32x4_t vrndmask4567 = vcaltq_f32(vmagic_number, vx4567);
float32x4_t vrndabsx0123 = vaddq_f32(vabsx0123, vmagic_number);
float32x4_t vrndabsx4567 = vaddq_f32(vabsx4567, vmagic_number);
vrndmask0123 = vorrq_u32(vrndmask0123, vmovq_n_u32(UINT32_C(0x80000000)));
vrndmask4567 = vorrq_u32(vrndmask4567, vmovq_n_u32(UINT32_C(0x80000000)));
vrndabsx0123 = vsubq_f32(vrndabsx0123, vmagic_number);
vrndabsx4567 = vsubq_f32(vrndabsx4567, vmagic_number);
const float32x4_t vy0123 = vbslq_f32(vrndmask0123, vx0123, vrndabsx0123);
const float32x4_t vy4567 = vbslq_f32(vrndmask4567, vx4567, vrndabsx4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vabsx = vabsq_f32(vx);
uint32x4_t vrndmask = vcaltq_f32(vmagic_number, vx);
float32x4_t vrndabsx = vaddq_f32(vabsx, vmagic_number);
vrndmask = vorrq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
vrndabsx = vsubq_f32(vrndabsx, vmagic_number);
const float32x4_t vy = vbslq_f32(vrndmask, vx, vrndabsx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vabsx = vabsq_f32(vx);
uint32x4_t vrndmask = vcaltq_f32(vmagic_number, vx);
float32x4_t vrndabsx = vaddq_f32(vabsx, vmagic_number);
vrndmask = vorrq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
vrndabsx = vsubq_f32(vrndabsx, vmagic_number);
const float32x4_t vy = vbslq_f32(vrndmask, vx, vrndabsx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 3,082
| 36.144578
| 93
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-neonv8-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__neonv8_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vy0123 = vrndnq_f32(vx0123);
vst1q_f32(output, vy0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vy = vrndnq_f32(vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 1,306
| 25.14
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-neonv8-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__neonv8_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vy0123 = vrndnq_f32(vx0123);
const float32x4_t vy4567 = vrndnq_f32(vx4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vy = vrndnq_f32(vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vy = vrndnq_f32(vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 1,673
| 27.862069
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-scalar-libm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/scalar-libm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__scalar_libm_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
do {
const float vx = *input++;
const float vy = nearbyintf(vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 843
| 22.444444
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-scalar-libm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/scalar-libm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__scalar_libm_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
const float vy0 = nearbyintf(vx0);
const float vy1 = nearbyintf(vx1);
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
const float vy = nearbyintf(vx);
*output = vy;
}
}
| 1,106
| 22.553191
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-scalar-libm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/scalar-libm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__scalar_libm_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
const float vy0 = nearbyintf(vx0);
const float vy1 = nearbyintf(vx1);
const float vy2 = nearbyintf(vx2);
const float vy3 = nearbyintf(vx3);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
const float vy = nearbyintf(vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,365
| 23.392857
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-sse2-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndne-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__sse2_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmagic = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
input += 4;
const __m128i vintx0123 = _mm_cvtps_epi32(vx0123);
const __m128 vrndmask0123 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx0123, vmagic)));
const __m128 vrndx0123 = _mm_cvtepi32_ps(vintx0123);
const __m128 vy0123 = _mm_or_ps(_mm_and_ps(vx0123, vrndmask0123), _mm_andnot_ps(vrndmask0123, vrndx0123));
_mm_storeu_ps(output, vy0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128i vintx = _mm_cvtps_epi32(vx);
const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic)));
const __m128 vrndx = _mm_cvtepi32_ps(vintx);
__m128 vy = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vrndx));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,890
| 29.5
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-sse2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndne-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__sse2_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmagic = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128i vintx0123 = _mm_cvtps_epi32(vx0123);
const __m128i vintx4567 = _mm_cvtps_epi32(vx4567);
const __m128 vrndmask0123 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx0123, vmagic)));
const __m128 vrndmask4567 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx4567, vmagic)));
const __m128 vrndx0123 = _mm_cvtepi32_ps(vintx0123);
const __m128 vrndx4567 = _mm_cvtepi32_ps(vintx4567);
const __m128 vy0123 = _mm_or_ps(_mm_and_ps(vx0123, vrndmask0123), _mm_andnot_ps(vrndmask0123, vrndx0123));
const __m128 vy4567 = _mm_or_ps(_mm_and_ps(vx4567, vrndmask4567), _mm_andnot_ps(vrndmask4567, vrndx4567));
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128i vintx = _mm_cvtps_epi32(vx);
const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic)));
const __m128 vrndx = _mm_cvtepi32_ps(vintx);
const __m128 vy = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vrndx));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128i vintx = _mm_cvtps_epi32(vx);
const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic)));
const __m128 vrndx = _mm_cvtepi32_ps(vintx);
__m128 vy = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vrndx));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 2,778
| 33.7375
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-sse41-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/sse41.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__sse41_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
input += 4;
const __m128 vy0123 = _mm_round_ps(vx0123, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
_mm_storeu_ps(output, vy0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,364
| 25.25
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-sse41-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/sse41.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__sse41_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vy0123 = _mm_round_ps(vx0123, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m128 vy4567 = _mm_round_ps(vx4567, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,816
| 27.390625
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__wasmsimd_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input); input += 4;
const v128_t vy0123 = wasm_f32x4_nearest(vx0123);
wasm_v128_store(output, vy0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_f32x4_nearest(vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,317
| 24.843137
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndne-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndne_ukernel__wasmsimd_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input); input += 4;
const v128_t vx4567 = wasm_v128_load(input); input += 4;
const v128_t vy0123 = wasm_f32x4_nearest(vx0123);
const v128_t vy4567 = wasm_f32x4_nearest(vx4567);
wasm_v128_store(output, vy0123); output += 4;
wasm_v128_store(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input); input += 4;
const v128_t vy = wasm_f32x4_nearest(vx);
wasm_v128_store(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_f32x4_nearest(vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,702
| 27.864407
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__avx_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
const __m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vy01234567 = _mm256_round_ps(vx01234567, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
const __m256 vy89ABCDEF = _mm256_round_ps(vx89ABCDEF, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 2,268
| 29.253333
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__avx_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
input += 8;
const __m256 vy01234567 = _mm256_round_ps(vx01234567, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
_mm256_storeu_ps(output, vy01234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 1,785
| 27.349206
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__avx512f_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx0123456789ABCDEF = _mm512_loadu_ps(input);
input += 16;
const __m512 vy0123456789ABCDEF = _mm512_roundscale_ps(vx0123456789ABCDEF, _MM_FROUND_TO_POS_INF);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vy = _mm512_maskz_roundscale_ps(vmask, vx, _MM_FROUND_TO_POS_INF);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 1,576
| 29.326923
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__avx512f_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0123456789ABCDEF = _mm512_loadu_ps(input);
const __m512 vxGHIJKLMNOPQRSTUV = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vy0123456789ABCDEF = _mm512_roundscale_ps(vx0123456789ABCDEF, _MM_FROUND_TO_POS_INF);
const __m512 vyGHIJKLMNOPQRSTUV = _mm512_roundscale_ps(vxGHIJKLMNOPQRSTUV, _MM_FROUND_TO_POS_INF);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vy = _mm512_roundscale_ps(vx, _MM_FROUND_TO_POS_INF);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vy = _mm512_maskz_roundscale_ps(vmask, vx, _MM_FROUND_TO_POS_INF);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 2,062
| 31.234375
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndu-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__neon_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vintegral_threshold = vreinterpretq_f32_u32(vmovq_n_u32(UINT32_C(0x4B000000)));
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const int32x4_t vintx0123 = vcvtq_s32_f32(vx0123);
uint32x4_t vrndmask0123 = vcaltq_f32(vx0123, vintegral_threshold);
const float32x4_t vprerndx0123 = vcvtq_f32_s32(vintx0123);
vrndmask0123 = vbicq_u32(vrndmask0123, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vrndx0123 = vbslq_f32(vrndmask0123, vprerndx0123, vx0123);
uint32x4_t vadjmask0123 = vcgeq_f32(vrndx0123, vx0123);
const float32x4_t vadjrndx0123 = vaddq_f32(vrndx0123, vone);
vadjmask0123 = vorrq_u32(vadjmask0123, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vy0123 = vbslq_f32(vadjmask0123, vrndx0123, vadjrndx0123);
vst1q_f32(output, vy0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const int32x4_t vintx = vcvtq_s32_f32(vx);
const float32x4_t vprerndx = vcvtq_f32_s32(vintx);
uint32x4_t vrndmask = vcaltq_f32(vx, vintegral_threshold);
vrndmask = vbicq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vrndx = vbslq_f32(vrndmask, vprerndx, vx);
uint32x4_t vadjmask = vcgeq_f32(vrndx, vx);
const float32x4_t vadjrndx = vaddq_f32(vrndx, vone);
vadjmask = vorrq_u32(vadjmask, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vy = vbslq_f32(vadjmask, vrndx, vadjrndx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 2,543
| 32.473684
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndu-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__neon_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vintegral_threshold = vreinterpretq_f32_u32(vmovq_n_u32(UINT32_C(0x4B000000)));
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const int32x4_t vintx0123 = vcvtq_s32_f32(vx0123);
const int32x4_t vintx4567 = vcvtq_s32_f32(vx4567);
uint32x4_t vrndmask0123 = vcaltq_f32(vx0123, vintegral_threshold);
uint32x4_t vrndmask4567 = vcaltq_f32(vx4567, vintegral_threshold);
const float32x4_t vprerndx0123 = vcvtq_f32_s32(vintx0123);
const float32x4_t vprerndx4567 = vcvtq_f32_s32(vintx4567);
vrndmask0123 = vbicq_u32(vrndmask0123, vmovq_n_u32(UINT32_C(0x80000000)));
vrndmask4567 = vbicq_u32(vrndmask4567, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vrndx0123 = vbslq_f32(vrndmask0123, vprerndx0123, vx0123);
const float32x4_t vrndx4567 = vbslq_f32(vrndmask4567, vprerndx4567, vx4567);
uint32x4_t vadjmask0123 = vcgeq_f32(vrndx0123, vx0123);
uint32x4_t vadjmask4567 = vcgeq_f32(vrndx4567, vx4567);
const float32x4_t vadjrndx0123 = vaddq_f32(vrndx0123, vone);
const float32x4_t vadjrndx4567 = vaddq_f32(vrndx4567, vone);
vadjmask0123 = vorrq_u32(vadjmask0123, vmovq_n_u32(UINT32_C(0x80000000)));
vadjmask4567 = vorrq_u32(vadjmask4567, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vy0123 = vbslq_f32(vadjmask0123, vrndx0123, vadjrndx0123);
const float32x4_t vy4567 = vbslq_f32(vadjmask4567, vrndx4567, vadjrndx4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const int32x4_t vintx = vcvtq_s32_f32(vx);
uint32x4_t vrndmask = vcaltq_f32(vx, vintegral_threshold);
const float32x4_t vprerndx = vcvtq_f32_s32(vintx);
vrndmask = vbicq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vrndx = vbslq_f32(vrndmask, vprerndx, vx);
uint32x4_t vadjmask = vcgeq_f32(vrndx, vx);
const float32x4_t vadjrndx = vaddq_f32(vrndx, vone);
vadjmask = vorrq_u32(vadjmask, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vy = vbslq_f32(vadjmask, vrndx, vadjrndx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const int32x4_t vintx = vcvtq_s32_f32(vx);
const float32x4_t vprerndx = vcvtq_f32_s32(vintx);
uint32x4_t vrndmask = vcaltq_f32(vx, vintegral_threshold);
vrndmask = vbicq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vrndx = vbslq_f32(vrndmask, vprerndx, vx);
uint32x4_t vadjmask = vcgeq_f32(vrndx, vx);
const float32x4_t vadjrndx = vaddq_f32(vrndx, vone);
vadjmask = vorrq_u32(vadjmask, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vy = vbslq_f32(vadjmask, vrndx, vadjrndx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 3,992
| 38.93
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-neonv8-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__neonv8_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vy0123 = vrndpq_f32(vx0123);
vst1q_f32(output, vy0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vy = vrndpq_f32(vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 1,305
| 25.12
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-neonv8-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__neonv8_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vy0123 = vrndpq_f32(vx0123);
const float32x4_t vy4567 = vrndpq_f32(vx4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vy = vrndpq_f32(vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vy = vrndpq_f32(vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 1,672
| 27.844828
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-scalar-libm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/scalar-libm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__scalar_libm_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
do {
const float vx = *input++;
const float vy = ceilf(vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 837
| 22.277778
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-scalar-libm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/scalar-libm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__scalar_libm_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
const float vy0 = ceilf(vx0);
const float vy1 = ceilf(vx1);
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
const float vy = ceilf(vx);
*output = vy;
}
}
| 1,090
| 22.212766
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-scalar-libm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/scalar-libm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__scalar_libm_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
const float vy0 = ceilf(vx0);
const float vy1 = ceilf(vx1);
const float vy2 = ceilf(vx2);
const float vy3 = ceilf(vx3);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
const float vy = ceilf(vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,339
| 22.928571
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-sse2-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndu-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__sse2_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmagic = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
const __m128 vone = _mm_load_ps(params->sse2.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
input += 4;
const __m128i vintx0123 = _mm_cvttps_epi32(vx0123);
const __m128 vrndmask0123 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx0123, vmagic)));
const __m128 vprerndx0123 = _mm_cvtepi32_ps(vintx0123);
const __m128 vrndx0123 = _mm_or_ps(_mm_and_ps(vx0123, vrndmask0123), _mm_andnot_ps(vrndmask0123, vprerndx0123));
const __m128 vadjmask0123 = _mm_or_ps(_mm_cmpge_ps(vrndx0123, vx0123), _mm_castsi128_ps(vmagic));
const __m128 vadjrndx0123 = _mm_add_ps(vrndx0123, vone);
const __m128 vy0123 = _mm_or_ps(_mm_and_ps(vrndx0123, vadjmask0123), _mm_andnot_ps(vadjmask0123, vadjrndx0123));
_mm_storeu_ps(output, vy0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128i vintx = _mm_cvttps_epi32(vx);
const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic)));
const __m128 vprerndx = _mm_cvtepi32_ps(vintx);
const __m128 vrndx = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vprerndx));
const __m128 vadjmask = _mm_or_ps(_mm_cmpge_ps(vrndx, vx), _mm_castsi128_ps(vmagic));
const __m128 vadjrndx = _mm_add_ps(vrndx, vone);
__m128 vy = _mm_or_ps(_mm_and_ps(vrndx, vadjmask), _mm_andnot_ps(vadjmask, vadjrndx));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 2,484
| 33.513889
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-sse2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndu-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__sse2_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmagic = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
const __m128 vone = _mm_load_ps(params->sse2.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128i vintx0123 = _mm_cvttps_epi32(vx0123);
const __m128i vintx4567 = _mm_cvttps_epi32(vx4567);
const __m128 vrndmask0123 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx0123, vmagic)));
const __m128 vrndmask4567 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx4567, vmagic)));
const __m128 vprerndx0123 = _mm_cvtepi32_ps(vintx0123);
const __m128 vprerndx4567 = _mm_cvtepi32_ps(vintx4567);
const __m128 vrndx0123 = _mm_or_ps(_mm_and_ps(vx0123, vrndmask0123), _mm_andnot_ps(vrndmask0123, vprerndx0123));
const __m128 vrndx4567 = _mm_or_ps(_mm_and_ps(vx4567, vrndmask4567), _mm_andnot_ps(vrndmask4567, vprerndx4567));
const __m128 vadjmask0123 = _mm_or_ps(_mm_cmpge_ps(vrndx0123, vx0123), _mm_castsi128_ps(vmagic));
const __m128 vadjmask4567 = _mm_or_ps(_mm_cmpge_ps(vrndx4567, vx4567), _mm_castsi128_ps(vmagic));
const __m128 vadjrndx0123 = _mm_add_ps(vrndx0123, vone);
const __m128 vadjrndx4567 = _mm_add_ps(vrndx4567, vone);
const __m128 vy0123 = _mm_or_ps(_mm_and_ps(vrndx0123, vadjmask0123), _mm_andnot_ps(vadjmask0123, vadjrndx0123));
const __m128 vy4567 = _mm_or_ps(_mm_and_ps(vrndx4567, vadjmask4567), _mm_andnot_ps(vadjmask4567, vadjrndx4567));
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128i vintx = _mm_cvttps_epi32(vx);
const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic)));
const __m128 vprerndx = _mm_cvtepi32_ps(vintx);
const __m128 vrndx = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vprerndx));
const __m128 vadjmask = _mm_or_ps(_mm_cmpge_ps(vrndx, vx), _mm_castsi128_ps(vmagic));
const __m128 vadjrndx = _mm_add_ps(vrndx, vone);
const __m128 vy = _mm_or_ps(_mm_and_ps(vrndx, vadjmask), _mm_andnot_ps(vadjmask, vadjrndx));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128i vintx = _mm_cvttps_epi32(vx);
const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic)));
const __m128 vprerndx = _mm_cvtepi32_ps(vintx);
const __m128 vrndx = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vprerndx));
const __m128 vadjmask = _mm_or_ps(_mm_cmpge_ps(vrndx, vx), _mm_castsi128_ps(vmagic));
const __m128 vadjrndx = _mm_add_ps(vrndx, vone);
__m128 vy = _mm_or_ps(_mm_and_ps(vrndx, vadjmask), _mm_andnot_ps(vadjmask, vadjrndx));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 3,912
| 39.760417
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-sse41-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/sse41.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__sse41_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
input += 4;
const __m128 vy0123 = _mm_round_ps(vx0123, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
_mm_storeu_ps(output, vy0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,355
| 25.076923
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-sse41-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/sse41.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__sse41_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vy0123 = _mm_round_ps(vx0123, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
const __m128 vy4567 = _mm_round_ps(vx4567, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,799
| 27.125
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__wasmsimd_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input); input += 4;
const v128_t vy0123 = wasm_f32x4_ceil(vx0123);
wasm_v128_store(output, vy0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_f32x4_ceil(vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,310
| 24.705882
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndu-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndu_ukernel__wasmsimd_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input); input += 4;
const v128_t vx4567 = wasm_v128_load(input); input += 4;
const v128_t vy0123 = wasm_f32x4_ceil(vx0123);
const v128_t vy4567 = wasm_f32x4_ceil(vx4567);
wasm_v128_store(output, vy0123); output += 4;
wasm_v128_store(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input); input += 4;
const v128_t vy = wasm_f32x4_ceil(vx);
wasm_v128_store(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_f32x4_ceil(vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,689
| 27.644068
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__avx_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
const __m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vy01234567 = _mm256_round_ps(vx01234567, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
const __m256 vy89ABCDEF = _mm256_round_ps(vx89ABCDEF, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 2,256
| 29.093333
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__avx_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
input += 8;
const __m256 vy01234567 = _mm256_round_ps(vx01234567, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
_mm256_storeu_ps(output, vy01234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 1,779
| 27.253968
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__avx512f_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx0123456789ABCDEF = _mm512_loadu_ps(input);
input += 16;
const __m512 vy0123456789ABCDEF = _mm512_roundscale_ps(vx0123456789ABCDEF, _MM_FROUND_TO_ZERO);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vy = _mm512_maskz_roundscale_ps(vmask, vx, _MM_FROUND_TO_ZERO);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 1,570
| 29.211538
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__avx512f_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0123456789ABCDEF = _mm512_loadu_ps(input);
const __m512 vxGHIJKLMNOPQRSTUV = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vy0123456789ABCDEF = _mm512_roundscale_ps(vx0123456789ABCDEF, _MM_FROUND_TO_ZERO);
const __m512 vyGHIJKLMNOPQRSTUV = _mm512_roundscale_ps(vxGHIJKLMNOPQRSTUV, _MM_FROUND_TO_ZERO);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vy = _mm512_roundscale_ps(vx, _MM_FROUND_TO_ZERO);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vy = _mm512_maskz_roundscale_ps(vmask, vx, _MM_FROUND_TO_ZERO);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 2,050
| 31.046875
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndz-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__neon_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vintegral_threshold = vreinterpretq_f32_u32(vmovq_n_u32(UINT32_C(0x4B000000)));
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const int32x4_t vintx0123 = vcvtq_s32_f32(vx0123);
uint32x4_t vrndmask0123 = vcaltq_f32(vx0123, vintegral_threshold);
const float32x4_t vrndx0123 = vcvtq_f32_s32(vintx0123);
vrndmask0123 = vbicq_u32(vrndmask0123, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vy0123 = vbslq_f32(vrndmask0123, vrndx0123, vx0123);
vst1q_f32(output, vy0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const int32x4_t vintx = vcvtq_s32_f32(vx);
uint32x4_t vrndmask = vcaltq_f32(vx, vintegral_threshold);
const float32x4_t vrndx = vcvtq_f32_s32(vintx);
vrndmask = vbicq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vy = vbslq_f32(vrndmask, vrndx, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 1,949
| 29.952381
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndz-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__neon_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vintegral_threshold = vreinterpretq_f32_u32(vmovq_n_u32(UINT32_C(0x4B000000)));
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const int32x4_t vintx0123 = vcvtq_s32_f32(vx0123);
const int32x4_t vintx4567 = vcvtq_s32_f32(vx4567);
uint32x4_t vrndmask0123 = vcaltq_f32(vx0123, vintegral_threshold);
uint32x4_t vrndmask4567 = vcaltq_f32(vx4567, vintegral_threshold);
const float32x4_t vrndx0123 = vcvtq_f32_s32(vintx0123);
const float32x4_t vrndx4567 = vcvtq_f32_s32(vintx4567);
vrndmask0123 = vbicq_u32(vrndmask0123, vmovq_n_u32(UINT32_C(0x80000000)));
vrndmask4567 = vbicq_u32(vrndmask4567, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vy0123 = vbslq_f32(vrndmask0123, vrndx0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vrndmask4567, vrndx4567, vx4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const int32x4_t vintx = vcvtq_s32_f32(vx);
uint32x4_t vrndmask = vcaltq_f32(vx, vintegral_threshold);
const float32x4_t vrndx = vcvtq_f32_s32(vintx);
vrndmask = vbicq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vy = vbslq_f32(vrndmask, vrndx, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const int32x4_t vintx = vcvtq_s32_f32(vx);
uint32x4_t vrndmask = vcaltq_f32(vx, vintegral_threshold);
const float32x4_t vrndx = vcvtq_f32_s32(vintx);
vrndmask = vbicq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
const float32x4_t vy = vbslq_f32(vrndmask, vrndx, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 2,854
| 35.139241
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-neonv8-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__neonv8_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vy0123 = vrndq_f32(vx0123);
vst1q_f32(output, vy0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vy = vrndq_f32(vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 1,303
| 25.08
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-neonv8-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__neonv8_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vy0123 = vrndq_f32(vx0123);
const float32x4_t vy4567 = vrndq_f32(vx4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vy = vrndq_f32(vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vy = vrndq_f32(vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 1,668
| 27.775862
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-scalar-libm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/scalar-libm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__scalar_libm_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
do {
const float vx = *input++;
const float vy = truncf(vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 838
| 22.305556
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-scalar-libm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/scalar-libm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__scalar_libm_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
const float vy0 = truncf(vx0);
const float vy1 = truncf(vx1);
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
const float vy = truncf(vx);
*output = vy;
}
}
| 1,093
| 22.276596
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-scalar-libm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/scalar-libm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__scalar_libm_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
const float vy0 = truncf(vx0);
const float vy1 = truncf(vx1);
const float vy2 = truncf(vx2);
const float vy3 = truncf(vx3);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
const float vy = truncf(vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,344
| 23.017857
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-sse2-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndz-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__sse2_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmagic = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
input += 4;
const __m128i vintx0123 = _mm_cvttps_epi32(vx0123);
const __m128 vrndmask0123 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx0123, vmagic)));
const __m128 vrndx0123 = _mm_cvtepi32_ps(vintx0123);
const __m128 vy0123 = _mm_or_ps(_mm_and_ps(vx0123, vrndmask0123), _mm_andnot_ps(vrndmask0123, vrndx0123));
_mm_storeu_ps(output, vy0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128i vintx = _mm_cvttps_epi32(vx);
const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic)));
const __m128 vrndx = _mm_cvtepi32_ps(vintx);
__m128 vy = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vrndx));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,890
| 29.5
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-sse2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/vrndz-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__sse2_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmagic = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128i vintx0123 = _mm_cvttps_epi32(vx0123);
const __m128i vintx4567 = _mm_cvttps_epi32(vx4567);
const __m128 vrndmask0123 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx0123, vmagic)));
const __m128 vrndmask4567 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx4567, vmagic)));
const __m128 vrndx0123 = _mm_cvtepi32_ps(vintx0123);
const __m128 vrndx4567 = _mm_cvtepi32_ps(vintx4567);
const __m128 vy0123 = _mm_or_ps(_mm_and_ps(vx0123, vrndmask0123), _mm_andnot_ps(vrndmask0123, vrndx0123));
const __m128 vy4567 = _mm_or_ps(_mm_and_ps(vx4567, vrndmask4567), _mm_andnot_ps(vrndmask4567, vrndx4567));
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128i vintx = _mm_cvttps_epi32(vx);
const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic)));
const __m128 vrndx = _mm_cvtepi32_ps(vintx);
const __m128 vy = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vrndx));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128i vintx = _mm_cvttps_epi32(vx);
const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic)));
const __m128 vrndx = _mm_cvtepi32_ps(vintx);
__m128 vy = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vrndx));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 2,780
| 33.7625
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-sse41-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/sse41.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__sse41_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
input += 4;
const __m128 vy0123 = _mm_round_ps(vx0123, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
_mm_storeu_ps(output, vy0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,349
| 24.961538
| 87
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.