repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-minmax-wasmsimd-arm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_minmax_ukernel__wasmsimd_arm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_sub(vb, va0);
v128_t vy1 = wasm_f32x4_sub(vb, va1);
vy0 = wasm_f32x4_max(vy0, voutput_min);
vy1 = wasm_f32x4_max(vy1, voutput_min);
vy0 = wasm_f32x4_min(vy0, voutput_max);
vy1 = wasm_f32x4_min(vy1, voutput_max);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(vb, va);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(vb, va);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,281
| 26.166667
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-minmax-wasmsimd-x86-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_minmax_ukernel__wasmsimd_x86_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_sub(vb, va0);
v128_t vy1 = wasm_f32x4_sub(vb, va1);
v128_t vy2 = wasm_f32x4_sub(vb, va2);
v128_t vy3 = wasm_f32x4_sub(vb, va3);
vy0 = wasm_f32x4_pmax(voutput_min, vy0);
vy1 = wasm_f32x4_pmax(voutput_min, vy1);
vy2 = wasm_f32x4_pmax(voutput_min, vy2);
vy3 = wasm_f32x4_pmax(voutput_min, vy3);
vy0 = wasm_f32x4_pmin(voutput_max, vy0);
vy1 = wasm_f32x4_pmin(voutput_max, vy1);
vy2 = wasm_f32x4_pmin(voutput_max, vy2);
vy3 = wasm_f32x4_pmin(voutput_max, vy3);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(vb, va);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(vb, va);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,740
| 28.159574
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-minmax-wasmsimd-x86-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_minmax_ukernel__wasmsimd_x86_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(vb, va);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(vb, va);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,742
| 25.815385
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-minmax-wasmsimd-x86-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_minmax_ukernel__wasmsimd_x86_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_sub(vb, va0);
v128_t vy1 = wasm_f32x4_sub(vb, va1);
vy0 = wasm_f32x4_pmax(voutput_min, vy0);
vy1 = wasm_f32x4_pmax(voutput_min, vy1);
vy0 = wasm_f32x4_pmin(voutput_max, vy0);
vy1 = wasm_f32x4_pmin(voutput_max, vy1);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(vb, va);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(vb, va);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,289
| 26.261905
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-relu-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_relu_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = vb - va;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
}
}
| 951
| 23.410256
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-relu-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_relu_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = vb - va0;
float vacc1 = vb - va1;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = vb - va;
vacc = math_max_f32(vacc, 0.0f);
*output = vacc;
}
}
| 1,314
| 22.482143
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-relu-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_relu_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = vb - va0;
float vacc1 = vb - va1;
float vacc2 = vb - va2;
float vacc3 = vb - va3;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
vacc2 = math_max_f32(vacc2, 0.0f);
vacc3 = math_max_f32(vacc3, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = vb - va;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,603
| 23.30303
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-relu-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_relu_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = vb - va0;
float vacc1 = vb - va1;
float vacc2 = vb - va2;
float vacc3 = vb - va3;
float vacc4 = vb - va4;
float vacc5 = vb - va5;
float vacc6 = vb - va6;
float vacc7 = vb - va7;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
vacc2 = math_max_f32(vacc2, 0.0f);
vacc3 = math_max_f32(vacc3, 0.0f);
vacc4 = math_max_f32(vacc4, 0.0f);
vacc5 = math_max_f32(vacc5, 0.0f);
vacc6 = math_max_f32(vacc6, 0.0f);
vacc7 = math_max_f32(vacc7, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = vb - va;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,099
| 24.609756
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-relu-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_relu_ukernel__wasm_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = vb - va;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
}
}
| 959
| 23.615385
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-relu-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_relu_ukernel__wasm_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = vb - va0;
float vacc1 = vb - va1;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = vb - va;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output = vacc;
}
}
| 1,342
| 22.982143
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-relu-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_relu_ukernel__wasm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = vb - va0;
float vacc1 = vb - va1;
float vacc2 = vb - va2;
float vacc3 = vb - va3;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f);
vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = vb - va;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,651
| 24.030303
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-relu-wasm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_relu_ukernel__wasm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = vb - va0;
float vacc1 = vb - va1;
float vacc2 = vb - va2;
float vacc3 = vb - va3;
float vacc4 = vb - va4;
float vacc5 = vb - va5;
float vacc6 = vb - va6;
float vacc7 = vb - va7;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f);
vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f);
vacc4 = __builtin_wasm_max_f32(vacc4, 0.0f);
vacc5 = __builtin_wasm_max_f32(vacc5, 0.0f);
vacc6 = __builtin_wasm_max_f32(vacc6, 0.0f);
vacc7 = __builtin_wasm_max_f32(vacc7, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = vb - va;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,187
| 25.682927
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-relu-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_relu_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_sub(vb, va0);
v128_t vy1 = wasm_f32x4_sub(vb, va1);
v128_t vy2 = wasm_f32x4_sub(vb, va2);
v128_t vy3 = wasm_f32x4_sub(vb, va3);
vy0 = wasm_i32x4_max(vy0, vzero);
vy1 = wasm_i32x4_max(vy1, vzero);
vy2 = wasm_i32x4_max(vy2, vzero);
vy3 = wasm_i32x4_max(vy3, vzero);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(vb, va);
vy = wasm_i32x4_max(vy, vzero);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(vb, va);
vy = wasm_i32x4_max(vy, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,323
| 26.023256
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-relu-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_relu_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(vb, va);
vy = wasm_i32x4_max(vy, vzero);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(vb, va);
vy = wasm_i32x4_max(vy, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,534
| 23.758065
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-relu-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_relu_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_sub(vb, va0);
v128_t vy1 = wasm_f32x4_sub(vb, va1);
vy0 = wasm_i32x4_max(vy0, vzero);
vy1 = wasm_i32x4_max(vy1, vzero);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(vb, va);
vy = wasm_i32x4_max(vy, vzero);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(vb, va);
vy = wasm_i32x4_max(vy, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,976
| 24.346154
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = vb - va;
*output++ = vacc;
}
}
| 912
| 23.026316
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = vb - va0;
float vacc1 = vb - va1;
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = vb - va;
*output = vacc;
}
}
| 1,197
| 21.603774
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = vb - va0;
float vacc1 = vb - va1;
float vacc2 = vb - va2;
float vacc3 = vb - va3;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = vb - va;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,406
| 22.065574
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = vb - va0;
float vacc1 = vb - va1;
float vacc2 = vb - va2;
float vacc3 = vb - va3;
float vacc4 = vb - va4;
float vacc5 = vb - va5;
float vacc6 = vb - va6;
float vacc7 = vb - va7;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = vb - va;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,746
| 22.931507
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_sub(vb, va0);
v128_t vy1 = wasm_f32x4_sub(vb, va1);
v128_t vy2 = wasm_f32x4_sub(vb, va2);
v128_t vy3 = wasm_f32x4_sub(vb, va3);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(vb, va);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(vb, va);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,047
| 24.924051
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(vb, va);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(vb, va);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,410
| 22.915254
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vrsubc-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vrsubc_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_sub(vb, va0);
v128_t vy1 = wasm_f32x4_sub(vb, va1);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(vb, va);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(vb, va);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,776
| 23.342466
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiff-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiff_ukernel__avx_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vacc0 = _mm256_loadu_ps(input_a);
__m256 vacc1 = _mm256_loadu_ps(input_a + 8);
input_a += 16;
vacc0 = _mm256_sub_ps(vacc0, _mm256_loadu_ps(input_b));
vacc1 = _mm256_sub_ps(vacc1, _mm256_loadu_ps(input_b + 8));
input_b += 16;
vacc0 = _mm256_mul_ps(vacc0, vacc0);
vacc1 = _mm256_mul_ps(vacc1, vacc1);
_mm256_storeu_ps(output, vacc0);
_mm256_storeu_ps(output + 8, vacc1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_sub_ps(vacc, _mm256_loadu_ps(input_b));
input_b += 8;
vacc = _mm256_mul_ps(vacc, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
const __m256 vb = _mm256_maskload_ps(input_b, vmask);
vacc = _mm256_sub_ps(vacc, vb);
vacc = _mm256_mul_ps(vacc, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,406
| 26.988372
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiff-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiff_ukernel__avx_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_sub_ps(vacc, _mm256_loadu_ps(input_b));
input_b += 8;
vacc = _mm256_mul_ps(vacc, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
const __m256 vb = _mm256_maskload_ps(input_b, vmask);
vacc = _mm256_sub_ps(vacc, vb);
vacc = _mm256_mul_ps(vacc, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 1,894
| 26.463768
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiff-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiff_ukernel__avx512f_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input_a);
input_a += 16;
vacc = _mm512_sub_ps(vacc, _mm512_loadu_ps(input_b));
input_b += 16;
vacc = _mm512_mul_ps(vacc, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_sub_ps(vmask, vacc, _mm512_maskz_loadu_ps(vmask, input_b));
vacc = _mm512_maskz_mul_ps(vmask, vacc, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,657
| 27.586207
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiff-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiff_ukernel__avx512f_x32(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vacc0 = _mm512_loadu_ps(input_a);
__m512 vacc1 = _mm512_loadu_ps(input_a + 16);
input_a += 32;
vacc0 = _mm512_sub_ps(vacc0, _mm512_loadu_ps(input_b));
vacc1 = _mm512_sub_ps(vacc1, _mm512_loadu_ps(input_b + 16));
input_b += 32;
vacc0 = _mm512_mul_ps(vacc0, vacc0);
vacc1 = _mm512_mul_ps(vacc1, vacc1);
_mm512_storeu_ps(output, vacc0);
_mm512_storeu_ps(output + 16, vacc1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input_a);
input_a += 16;
vacc = _mm512_sub_ps(vacc, _mm512_loadu_ps(input_b));
input_b += 16;
vacc = _mm512_mul_ps(vacc, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_sub_ps(vmask, vacc, _mm512_maskz_loadu_ps(vmask, input_b));
vacc = _mm512_maskz_mul_ps(vmask, vacc, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 2,171
| 27.96
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiff-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiff_ukernel__neon_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb = vld1q_f32(input_b); input_b += 4;
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmulq_f32(vacc, vacc);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
const float32x4_t vb = vld1q_f32(input_b);
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmulq_f32(vacc, vacc);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,528
| 25.362069
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiff-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiff_ukernel__neon_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb0 = vld1q_f32(input_b); input_b += 4;
const float32x4_t va1 = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb1 = vld1q_f32(input_b); input_b += 4;
float32x4_t vacc0 = vsubq_f32(va0, vb0);
float32x4_t vacc1 = vsubq_f32(va1, vb1);
vacc0 = vmulq_f32(vacc0, vacc0);
vacc1 = vmulq_f32(vacc1, vacc1);
vst1q_f32(output, vacc0); output += 4;
vst1q_f32(output, vacc1); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb = vld1q_f32(input_b); input_b += 4;
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmulq_f32(vacc, vacc);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
const float32x4_t vb = vld1q_f32(input_b);
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmulq_f32(vacc, vacc);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 2,101
| 27.405405
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiff-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiff_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
vacc = vacc * vacc;
*output++ = vacc;
}
}
| 941
| 23.153846
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiff-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiff_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
vacc0 = vacc0 * vacc0;
vacc1 = vacc1 * vacc1;
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va - vb;
vacc = vacc * vacc;
*output = vacc;
}
}
| 1,367
| 21.8
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiff-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiff_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
float vacc2 = va2 - vb2;
float vacc3 = va3 - vb3;
vacc0 = vacc0 * vacc0;
vacc1 = vacc1 * vacc1;
vacc2 = vacc2 * vacc2;
vacc3 = vacc3 * vacc3;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
vacc = vacc * vacc;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,706
| 22.708333
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiff-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiff_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
float vacc2 = va2 - vb2;
float vacc3 = va3 - vb3;
float vacc4 = va4 - vb4;
float vacc5 = va5 - vb5;
float vacc6 = va6 - vb6;
float vacc7 = va7 - vb7;
vacc0 = vacc0 * vacc0;
vacc1 = vacc1 * vacc1;
vacc2 = vacc2 * vacc2;
vacc3 = vacc3 * vacc3;
vacc4 = vacc4 * vacc4;
vacc5 = vacc5 * vacc5;
vacc6 = vacc6 * vacc6;
vacc7 = vacc7 * vacc7;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
vacc = vacc * vacc;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,294
| 23.945652
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiff-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiff_ukernel__sse_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
const __m128 vb = _mm_loadu_ps(input_b);
input_b += 4;
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_mul_ps(vacc, vacc);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
const __m128 vb = _mm_loadu_ps(input_b);
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_mul_ps(vacc, vacc);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,538
| 23.428571
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiff-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiff_ukernel__sse_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(input_a);
const __m128 va1 = _mm_loadu_ps(input_a + 4);
input_a += 8;
const __m128 vb0 = _mm_loadu_ps(input_b);
const __m128 vb1 = _mm_loadu_ps(input_b + 4);
input_b += 8;
__m128 vacc0 = _mm_sub_ps(va0, vb0);
__m128 vacc1 = _mm_sub_ps(va1, vb1);
vacc0 = _mm_mul_ps(vacc0, vacc0);
vacc1 = _mm_mul_ps(vacc1, vacc1);
_mm_storeu_ps(output, vacc0);
_mm_storeu_ps(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
const __m128 vb = _mm_loadu_ps(input_b);
input_b += 4;
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_mul_ps(vacc, vacc);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
const __m128 vb = _mm_loadu_ps(input_b);
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_mul_ps(vacc, vacc);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 2,089
| 24.180723
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiff-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiff_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
const v128_t vb2 = wasm_v128_load(input_b + 8);
const v128_t vb3 = wasm_v128_load(input_b + 12);
input_b += 16;
v128_t vacc0 = wasm_f32x4_sub(va0, vb0);
v128_t vacc1 = wasm_f32x4_sub(va1, vb1);
v128_t vacc2 = wasm_f32x4_sub(va2, vb2);
v128_t vacc3 = wasm_f32x4_sub(va3, vb3);
vacc0 = wasm_f32x4_mul(vacc0, vacc0);
vacc1 = wasm_f32x4_mul(vacc1, vacc1);
vacc2 = wasm_f32x4_mul(vacc2, vacc2);
vacc3 = wasm_f32x4_mul(vacc3, vacc3);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
wasm_v128_store(output + 8, vacc2);
wasm_v128_store(output + 12, vacc3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_mul(vacc, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_mul(vacc, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,615
| 26.829787
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiff-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiff_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_mul(vacc, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_mul(vacc, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,565
| 23.46875
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiff-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiff_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
input_b += 8;
v128_t vacc0 = wasm_f32x4_sub(va0, vb0);
v128_t vacc1 = wasm_f32x4_sub(va1, vb1);
vacc0 = wasm_f32x4_mul(vacc0, vacc0);
vacc1 = wasm_f32x4_mul(vacc1, vacc1);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_mul(vacc, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_mul(vacc, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,144
| 24.535714
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiffc-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiffc_ukernel__avx_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 vb = _mm256_broadcast_ss(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vacc0 = _mm256_loadu_ps(input_a);
__m256 vacc1 = _mm256_loadu_ps(input_a + 8);
input_a += 16;
vacc0 = _mm256_sub_ps(vacc0, vb);
vacc1 = _mm256_sub_ps(vacc1, vb);
vacc0 = _mm256_mul_ps(vacc0, vacc0);
vacc1 = _mm256_mul_ps(vacc1, vacc1);
_mm256_storeu_ps(output, vacc0);
_mm256_storeu_ps(output + 8, vacc1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_sub_ps(vacc, vb);
vacc = _mm256_mul_ps(vacc, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
vacc = _mm256_sub_ps(vacc, vb);
vacc = _mm256_mul_ps(vacc, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,293
| 26.309524
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiffc-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiffc_ukernel__avx_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 vb = _mm256_broadcast_ss(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_sub_ps(vacc, vb);
vacc = _mm256_mul_ps(vacc, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
vacc = _mm256_sub_ps(vacc, vb);
vacc = _mm256_mul_ps(vacc, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 1,848
| 26.191176
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiffc-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiffc_ukernel__avx512f_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 vb = _mm512_set1_ps(*input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc0 = _mm512_loadu_ps(input_a);
input_a += 16;
vacc0 = _mm512_sub_ps(vacc0, vb);
vacc0 = _mm512_mul_ps(vacc0, vacc0);
_mm512_storeu_ps(output, vacc0);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_sub_ps(vmask, vacc, vb);
vacc = _mm512_maskz_mul_ps(vmask, vacc, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,637
| 26.762712
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiffc-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiffc_ukernel__avx512f_x32(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 vb = _mm512_set1_ps(*input_b);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vacc0 = _mm512_loadu_ps(input_a);
__m512 vacc1 = _mm512_loadu_ps(input_a + 16);
input_a += 32;
vacc0 = _mm512_sub_ps(vacc0, vb);
vacc1 = _mm512_sub_ps(vacc1, vb);
vacc0 = _mm512_mul_ps(vacc0, vacc0);
vacc1 = _mm512_mul_ps(vacc1, vacc1);
_mm512_storeu_ps(output, vacc0);
_mm512_storeu_ps(output + 16, vacc1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input_a);
input_a += 16;
vacc = _mm512_sub_ps(vacc, vb);
vacc = _mm512_mul_ps(vacc, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_sub_ps(vmask, vacc, vb);
vacc = _mm512_maskz_mul_ps(vmask, vacc, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 2,074
| 27.424658
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiffc-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-neon.c.in
// Generator: tools/xngen
//
// Copyright 2_lo9 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiffc_ukernel__neon_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float32x4_t vb = vld1q_dup_f32(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmulq_f32(vacc, vacc);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmulq_f32(vacc, vacc);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,472
| 24.842105
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiffc-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-neon.c.in
// Generator: tools/xngen
//
// Copyright 2_lo9 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiffc_ukernel__neon_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float32x4_t vb = vld1q_dup_f32(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vacc_ = vld1q_f32(input_a); input_a += 4;
float32x4_t vaccl = vld1q_f32(input_a); input_a += 4;
vacc_ = vsubq_f32(vacc_, vb);
vaccl = vsubq_f32(vaccl, vb);
vacc_ = vmulq_f32(vacc_, vacc_);
vaccl = vmulq_f32(vaccl, vaccl);
vst1q_f32(output, vacc_); output += 4;
vst1q_f32(output, vaccl); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmulq_f32(vacc, vacc);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmulq_f32(vacc, vacc);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,891
| 25.647887
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiffc-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiffc_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va - vb;
vacc = vacc * vacc;
*output++ = vacc;
}
}
| 939
| 23.102564
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiffc-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiffc_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
vacc0 = vacc0 * vacc0;
vacc1 = vacc1 * vacc1;
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va - vb;
vacc = vacc * vacc;
*output = vacc;
}
}
| 1,278
| 21.839286
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiffc-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiffc_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
float vacc2 = va2 - vb;
float vacc3 = va3 - vb;
vacc0 = vacc0 * vacc0;
vacc1 = vacc1 * vacc1;
vacc2 = vacc2 * vacc2;
vacc3 = vacc3 * vacc3;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va - vb;
vacc = vacc * vacc;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,543
| 22.393939
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiffc-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiffc_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
float vacc2 = va2 - vb;
float vacc3 = va3 - vb;
float vacc4 = va4 - vb;
float vacc5 = va5 - vb;
float vacc6 = va6 - vb;
float vacc7 = va7 - vb;
vacc0 = vacc0 * vacc0;
vacc1 = vacc1 * vacc1;
vacc2 = vacc2 * vacc2;
vacc3 = vacc3 * vacc3;
vacc4 = vacc4 * vacc4;
vacc5 = vacc5 * vacc5;
vacc6 = vacc6 * vacc6;
vacc7 = vacc7 * vacc7;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va - vb;
vacc = vacc * vacc;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,991
| 23.292683
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiffc-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiffc_ukernel__sse_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128 vb = _mm_load1_ps(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_mul_ps(vacc, vacc);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_mul_ps(vacc, vacc);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,473
| 23.983051
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiffc-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiffc_ukernel__sse_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128 vb = _mm_load1_ps(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(input_a);
const __m128 va1 = _mm_loadu_ps(input_a + 4);
input_a += 8;
__m128 vacc0 = _mm_sub_ps(va0, vb);
__m128 vacc1 = _mm_sub_ps(va1, vb);
vacc0 = _mm_mul_ps(vacc0, vacc0);
vacc1 = _mm_mul_ps(vacc1, vacc1);
_mm_storeu_ps(output, vacc0);
_mm_storeu_ps(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_mul_ps(vacc, vacc);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_mul_ps(vacc, vacc);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,907
| 24.44
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiffc-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiffc_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_sub(va0, vb);
v128_t vy1 = wasm_f32x4_sub(va1, vb);
v128_t vy2 = wasm_f32x4_sub(va2, vb);
v128_t vy3 = wasm_f32x4_sub(va3, vb);
vy0 = wasm_f32x4_mul(vy0, vy0);
vy1 = wasm_f32x4_mul(vy1, vy1);
vy2 = wasm_f32x4_mul(vy2, vy2);
vy3 = wasm_f32x4_mul(vy3, vy3);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_mul(vy, vy);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_mul(vy, vy);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,260
| 25.6
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiffc-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiffc_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_mul(vy, vy);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_mul(vy, vy);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,479
| 23.262295
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsqrdiffc-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsqrdiffc_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_sub(va0, vb);
v128_t vy1 = wasm_f32x4_sub(va1, vb);
vy0 = wasm_f32x4_mul(vy0, vy0);
vy1 = wasm_f32x4_mul(vy1, vy1);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_mul(vy, vy);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_mul(vy, vy);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,917
| 23.909091
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__avx_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 voutput_min = _mm256_load_ps(params->avx.min);
const __m256 voutput_max = _mm256_load_ps(params->avx.max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vacc0 = _mm256_loadu_ps(input_a);
__m256 vacc1 = _mm256_loadu_ps(input_a + 8);
input_a += 16;
vacc0 = _mm256_sub_ps(vacc0, _mm256_loadu_ps(input_b));
vacc1 = _mm256_sub_ps(vacc1, _mm256_loadu_ps(input_b + 8));
input_b += 16;
vacc0 = _mm256_max_ps(voutput_min, vacc0);
vacc1 = _mm256_max_ps(voutput_min, vacc1);
vacc0 = _mm256_min_ps(voutput_max, vacc0);
vacc1 = _mm256_min_ps(voutput_max, vacc1);
_mm256_storeu_ps(output, vacc0);
_mm256_storeu_ps(output + 8, vacc1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_sub_ps(vacc, _mm256_loadu_ps(input_b));
input_b += 8;
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
const __m256 vb = _mm256_maskload_ps(input_b, vmask);
vacc = _mm256_sub_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,743
| 28.505376
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__avx_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 voutput_min = _mm256_load_ps(params->avx.min);
const __m256 voutput_max = _mm256_load_ps(params->avx.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_sub_ps(vacc, _mm256_loadu_ps(input_b));
input_b += 8;
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
const __m256 vb = _mm256_maskload_ps(input_b, vmask);
vacc = _mm256_sub_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,124
| 28.109589
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__avx512f_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 voutput_min = _mm512_set1_ps(params->scalar.min);
const __m512 voutput_max = _mm512_set1_ps(params->scalar.max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input_a);
input_a += 16;
vacc = _mm512_sub_ps(vacc, _mm512_loadu_ps(input_b));
input_b += 16;
vacc = _mm512_max_ps(voutput_min, vacc);
vacc = _mm512_min_ps(voutput_max, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_sub_ps(vmask, vacc, _mm512_maskz_loadu_ps(vmask, input_b));
vacc = _mm512_maskz_max_ps(vmask, voutput_min, vacc);
vacc = _mm512_maskz_min_ps(vmask, voutput_max, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,906
| 29.758065
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__avx512f_x32(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 voutput_min = _mm512_set1_ps(params->scalar.min);
const __m512 voutput_max = _mm512_set1_ps(params->scalar.max);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vacc0 = _mm512_loadu_ps(input_a);
__m512 vacc1 = _mm512_loadu_ps(input_a + 16);
input_a += 32;
vacc0 = _mm512_sub_ps(vacc0, _mm512_loadu_ps(input_b));
vacc1 = _mm512_sub_ps(vacc1, _mm512_loadu_ps(input_b + 16));
input_b += 32;
vacc0 = _mm512_max_ps(voutput_min, vacc0);
vacc1 = _mm512_max_ps(voutput_min, vacc1);
vacc0 = _mm512_min_ps(voutput_max, vacc0);
vacc1 = _mm512_min_ps(voutput_max, vacc1);
_mm512_storeu_ps(output, vacc0);
_mm512_storeu_ps(output + 16, vacc1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input_a);
input_a += 16;
vacc = _mm512_sub_ps(vacc, _mm512_loadu_ps(input_b));
input_b += 16;
vacc = _mm512_max_ps(voutput_min, vacc);
vacc = _mm512_min_ps(voutput_max, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_sub_ps(vmask, vacc, _mm512_maskz_loadu_ps(vmask, input_b));
vacc = _mm512_maskz_max_ps(vmask, voutput_min, vacc);
vacc = _mm512_maskz_min_ps(vmask, voutput_max, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 2,527
| 29.829268
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__neon_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float32x4_t voutput_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t voutput_max = vld1q_dup_f32(¶ms->scalar.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb = vld1q_f32(input_b); input_b += 4;
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
const float32x4_t vb = vld1q_f32(input_b);
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,766
| 27.5
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__neon_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float32x4_t voutput_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t voutput_max = vld1q_dup_f32(¶ms->scalar.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb0 = vld1q_f32(input_b); input_b += 4;
const float32x4_t va1 = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb1 = vld1q_f32(input_b); input_b += 4;
float32x4_t vacc0 = vsubq_f32(va0, vb0);
float32x4_t vacc1 = vsubq_f32(va1, vb1);
vacc0 = vmaxq_f32(vacc0, voutput_min);
vacc1 = vmaxq_f32(vacc1, voutput_min);
vacc0 = vminq_f32(vacc0, voutput_max);
vacc1 = vminq_f32(vacc1, voutput_max);
vst1q_f32(output, vacc0); output += 4;
vst1q_f32(output, vacc1); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb = vld1q_f32(input_b); input_b += 4;
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
const float32x4_t vb = vld1q_f32(input_b);
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 2,438
| 29.111111
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
}
}
| 1,103
| 25.285714
| 75
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va - vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output = vacc;
}
}
| 1,660
| 24.166667
| 75
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
float vacc2 = va2 - vb2;
float vacc3 = va3 - vb3;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc2 = math_max_f32(vacc2, voutput_min);
vacc3 = math_max_f32(vacc3, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
vacc2 = math_min_f32(vacc2, voutput_max);
vacc3 = math_min_f32(vacc3, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,131
| 25.65
| 75
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
float vacc2 = va2 - vb2;
float vacc3 = va3 - vb3;
float vacc4 = va4 - vb4;
float vacc5 = va5 - vb5;
float vacc6 = va6 - vb6;
float vacc7 = va7 - vb7;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc2 = math_max_f32(vacc2, voutput_min);
vacc3 = math_max_f32(vacc3, voutput_min);
vacc4 = math_max_f32(vacc4, voutput_min);
vacc5 = math_max_f32(vacc5, voutput_min);
vacc6 = math_max_f32(vacc6, voutput_min);
vacc7 = math_max_f32(vacc7, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
vacc2 = math_min_f32(vacc2, voutput_max);
vacc3 = math_min_f32(vacc3, voutput_max);
vacc4 = math_min_f32(vacc4, voutput_max);
vacc5 = math_min_f32(vacc5, voutput_max);
vacc6 = math_min_f32(vacc6, voutput_max);
vacc7 = math_min_f32(vacc7, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,979
| 27.653846
| 75
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__sse_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128 voutput_min = _mm_load_ps(params->sse.min);
const __m128 voutput_max = _mm_load_ps(params->sse.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
const __m128 vb = _mm_loadu_ps(input_b);
input_b += 4;
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
const __m128 vb = _mm_loadu_ps(input_b);
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,756
| 25.223881
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__sse_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128 voutput_min = _mm_load_ps(params->sse.min);
const __m128 voutput_max = _mm_load_ps(params->sse.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(input_a);
const __m128 va1 = _mm_loadu_ps(input_a + 4);
input_a += 8;
const __m128 vb0 = _mm_loadu_ps(input_b);
const __m128 vb1 = _mm_loadu_ps(input_b + 4);
input_b += 8;
__m128 vacc0 = _mm_sub_ps(va0, vb0);
__m128 vacc1 = _mm_sub_ps(va1, vb1);
vacc0 = _mm_max_ps(vacc0, voutput_min);
vacc1 = _mm_max_ps(vacc1, voutput_min);
vacc0 = _mm_min_ps(vacc0, voutput_max);
vacc1 = _mm_min_ps(vacc1, voutput_max);
_mm_storeu_ps(output, vacc0);
_mm_storeu_ps(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
const __m128 vb = _mm_loadu_ps(input_b);
input_b += 4;
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
const __m128 vb = _mm_loadu_ps(input_b);
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 2,408
| 25.766667
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__wasm_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
}
}
| 1,121
| 25.714286
| 75
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__wasm_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output = vacc;
}
}
| 1,718
| 25.045455
| 75
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__wasm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
float vacc2 = va2 - vb2;
float vacc3 = va3 - vb3;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc2 = __builtin_wasm_max_f32(vacc2, voutput_min);
vacc3 = __builtin_wasm_max_f32(vacc3, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
vacc2 = __builtin_wasm_min_f32(vacc2, voutput_max);
vacc3 = __builtin_wasm_min_f32(vacc3, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,229
| 26.875
| 75
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-wasm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__wasm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
float vacc2 = va2 - vb2;
float vacc3 = va3 - vb3;
float vacc4 = va4 - vb4;
float vacc5 = va5 - vb5;
float vacc6 = va6 - vb6;
float vacc7 = va7 - vb7;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc2 = __builtin_wasm_max_f32(vacc2, voutput_min);
vacc3 = __builtin_wasm_max_f32(vacc3, voutput_min);
vacc4 = __builtin_wasm_max_f32(vacc4, voutput_min);
vacc5 = __builtin_wasm_max_f32(vacc5, voutput_min);
vacc6 = __builtin_wasm_max_f32(vacc6, voutput_min);
vacc7 = __builtin_wasm_max_f32(vacc7, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
vacc2 = __builtin_wasm_min_f32(vacc2, voutput_max);
vacc3 = __builtin_wasm_min_f32(vacc3, voutput_max);
vacc4 = __builtin_wasm_min_f32(vacc4, voutput_max);
vacc5 = __builtin_wasm_min_f32(vacc5, voutput_max);
vacc6 = __builtin_wasm_min_f32(vacc6, voutput_max);
vacc7 = __builtin_wasm_min_f32(vacc7, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 3,157
| 29.365385
| 75
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-wasmsimd-arm-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__wasmsimd_arm_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
const v128_t vb2 = wasm_v128_load(input_b + 8);
const v128_t vb3 = wasm_v128_load(input_b + 12);
input_b += 16;
v128_t vacc0 = wasm_f32x4_sub(va0, vb0);
v128_t vacc1 = wasm_f32x4_sub(va1, vb1);
v128_t vacc2 = wasm_f32x4_sub(va2, vb2);
v128_t vacc3 = wasm_f32x4_sub(va3, vb3);
vacc0 = wasm_f32x4_max(vacc0, voutput_min);
vacc1 = wasm_f32x4_max(vacc1, voutput_min);
vacc2 = wasm_f32x4_max(vacc2, voutput_min);
vacc3 = wasm_f32x4_max(vacc3, voutput_min);
vacc0 = wasm_f32x4_min(vacc0, voutput_max);
vacc1 = wasm_f32x4_min(vacc1, voutput_max);
vacc2 = wasm_f32x4_min(vacc2, voutput_max);
vacc3 = wasm_f32x4_min(vacc3, voutput_max);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
wasm_v128_store(output + 8, vacc2);
wasm_v128_store(output + 12, vacc3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 3,094
| 29.048544
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-wasmsimd-arm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__wasmsimd_arm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,827
| 25.882353
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-wasmsimd-arm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__wasmsimd_arm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
input_b += 8;
v128_t vacc0 = wasm_f32x4_sub(va0, vb0);
v128_t vacc1 = wasm_f32x4_sub(va1, vb1);
vacc0 = wasm_f32x4_max(vacc0, voutput_min);
vacc1 = wasm_f32x4_max(vacc1, voutput_min);
vacc0 = wasm_f32x4_min(vacc0, voutput_max);
vacc1 = wasm_f32x4_min(vacc1, voutput_max);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,515
| 26.648352
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-wasmsimd-x86-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__wasmsimd_x86_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
const v128_t vb2 = wasm_v128_load(input_b + 8);
const v128_t vb3 = wasm_v128_load(input_b + 12);
input_b += 16;
v128_t vacc0 = wasm_f32x4_sub(va0, vb0);
v128_t vacc1 = wasm_f32x4_sub(va1, vb1);
v128_t vacc2 = wasm_f32x4_sub(va2, vb2);
v128_t vacc3 = wasm_f32x4_sub(va3, vb3);
vacc0 = wasm_f32x4_pmax(voutput_min, vacc0);
vacc1 = wasm_f32x4_pmax(voutput_min, vacc1);
vacc2 = wasm_f32x4_pmax(voutput_min, vacc2);
vacc3 = wasm_f32x4_pmax(voutput_min, vacc3);
vacc0 = wasm_f32x4_pmin(voutput_max, vacc0);
vacc1 = wasm_f32x4_pmin(voutput_max, vacc1);
vacc2 = wasm_f32x4_pmin(voutput_max, vacc2);
vacc3 = wasm_f32x4_pmin(voutput_max, vacc3);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
wasm_v128_store(output + 8, vacc2);
wasm_v128_store(output + 12, vacc3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 3,106
| 29.165049
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-wasmsimd-x86-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__wasmsimd_x86_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,831
| 25.941176
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-minmax-wasmsimd-x86-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_minmax_ukernel__wasmsimd_x86_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
input_b += 8;
v128_t vacc0 = wasm_f32x4_sub(va0, vb0);
v128_t vacc1 = wasm_f32x4_sub(va1, vb1);
vacc0 = wasm_f32x4_pmax(voutput_min, vacc0);
vacc1 = wasm_f32x4_pmax(voutput_min, vacc1);
vacc0 = wasm_f32x4_pmin(voutput_max, vacc0);
vacc1 = wasm_f32x4_pmin(voutput_max, vacc1);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,523
| 26.736264
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-relu-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_relu_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
}
}
| 952
| 23.435897
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-relu-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_relu_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va - vb;
vacc = math_max_f32(vacc, 0.0f);
*output = vacc;
}
}
| 1,402
| 22.383333
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-relu-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_relu_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
float vacc2 = va2 - vb2;
float vacc3 = va3 - vb3;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
vacc2 = math_max_f32(vacc2, 0.0f);
vacc3 = math_max_f32(vacc3, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,765
| 23.527778
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-relu-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_relu_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
float vacc2 = va2 - vb2;
float vacc3 = va3 - vb3;
float vacc4 = va4 - vb4;
float vacc5 = va5 - vb5;
float vacc6 = va6 - vb6;
float vacc7 = va7 - vb7;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
vacc2 = math_max_f32(vacc2, 0.0f);
vacc3 = math_max_f32(vacc3, 0.0f);
vacc4 = math_max_f32(vacc4, 0.0f);
vacc5 = math_max_f32(vacc5, 0.0f);
vacc6 = math_max_f32(vacc6, 0.0f);
vacc7 = math_max_f32(vacc7, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,401
| 25.108696
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-relu-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_relu_ukernel__wasm_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
}
}
| 960
| 23.641026
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-relu-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_relu_ukernel__wasm_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output = vacc;
}
}
| 1,430
| 22.85
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-relu-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_relu_ukernel__wasm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
float vacc2 = va2 - vb2;
float vacc3 = va3 - vb3;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f);
vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,813
| 24.194444
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-relu-wasm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_relu_ukernel__wasm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
float vacc2 = va2 - vb2;
float vacc3 = va3 - vb3;
float vacc4 = va4 - vb4;
float vacc5 = va5 - vb5;
float vacc6 = va6 - vb6;
float vacc7 = va7 - vb7;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f);
vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f);
vacc4 = __builtin_wasm_max_f32(vacc4, 0.0f);
vacc5 = __builtin_wasm_max_f32(vacc5, 0.0f);
vacc6 = __builtin_wasm_max_f32(vacc6, 0.0f);
vacc7 = __builtin_wasm_max_f32(vacc7, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,489
| 26.065217
| 73
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-relu-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_relu_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
const v128_t vb2 = wasm_v128_load(input_b + 8);
const v128_t vb3 = wasm_v128_load(input_b + 12);
input_b += 16;
v128_t vacc0 = wasm_f32x4_sub(va0, vb0);
v128_t vacc1 = wasm_f32x4_sub(va1, vb1);
v128_t vacc2 = wasm_f32x4_sub(va2, vb2);
v128_t vacc3 = wasm_f32x4_sub(va3, vb3);
vacc0 = wasm_i32x4_max(vacc0, vzero);
vacc1 = wasm_i32x4_max(vacc1, vzero);
vacc2 = wasm_i32x4_max(vacc2, vzero);
vacc3 = wasm_i32x4_max(vacc3, vzero);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
wasm_v128_store(output + 8, vacc2);
wasm_v128_store(output + 12, vacc3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,665
| 27.063158
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-relu-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_relu_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,615
| 23.861538
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-relu-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_relu_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
input_b += 8;
v128_t vacc0 = wasm_f32x4_sub(va0, vb0);
v128_t vacc1 = wasm_f32x4_sub(va1, vb1);
vacc0 = wasm_i32x4_max(vacc0, vzero);
vacc1 = wasm_i32x4_max(vacc1, vzero);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_sub(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,194
| 24.823529
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
*output++ = vacc;
}
}
| 913
| 23.052632
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va - vb;
*output = vacc;
}
}
| 1,285
| 21.561404
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
float vacc2 = va2 - vb2;
float vacc3 = va3 - vb3;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,568
| 22.41791
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 - vb0;
float vacc1 = va1 - vb1;
float vacc2 = va2 - vb2;
float vacc3 = va3 - vb3;
float vacc4 = va4 - vb4;
float vacc5 = va5 - vb5;
float vacc6 = va6 - vb6;
float vacc7 = va7 - vb7;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va - vb;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,048
| 23.686747
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
const v128_t vb2 = wasm_v128_load(input_b + 8);
const v128_t vb3 = wasm_v128_load(input_b + 12);
input_b += 16;
v128_t vacc0 = wasm_f32x4_sub(va0, vb0);
v128_t vacc1 = wasm_f32x4_sub(va1, vb1);
v128_t vacc2 = wasm_f32x4_sub(va2, vb2);
v128_t vacc3 = wasm_f32x4_sub(va3, vb3);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
wasm_v128_store(output + 8, vacc2);
wasm_v128_store(output + 12, vacc3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_sub(va, vb);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_sub(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,365
| 25.886364
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_sub(va, vb);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_sub(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,483
| 22.935484
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsub-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsub_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
input_b += 8;
v128_t vacc0 = wasm_f32x4_sub(va0, vb0);
v128_t vacc1 = wasm_f32x4_sub(va1, vb1);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_sub(va, vb);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_sub(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,978
| 23.7375
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__avx_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 voutput_min = _mm256_load_ps(params->avx.min);
const __m256 voutput_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_broadcast_ss(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vacc0 = _mm256_loadu_ps(input_a);
__m256 vacc1 = _mm256_loadu_ps(input_a + 8);
input_a += 16;
vacc0 = _mm256_sub_ps(vacc0, vb);
vacc1 = _mm256_sub_ps(vacc1, vb);
vacc0 = _mm256_max_ps(voutput_min, vacc0);
vacc1 = _mm256_max_ps(voutput_min, vacc1);
vacc0 = _mm256_min_ps(voutput_max, vacc0);
vacc1 = _mm256_min_ps(voutput_max, vacc1);
_mm256_storeu_ps(output, vacc0);
_mm256_storeu_ps(output + 8, vacc1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_sub_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
vacc = _mm256_sub_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,630
| 27.912088
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__avx_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 voutput_min = _mm256_load_ps(params->avx.min);
const __m256 voutput_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_broadcast_ss(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_sub_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
vacc = _mm256_sub_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,078
| 27.875
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__avx512f_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 voutput_min = _mm512_set1_ps(params->scalar.min);
const __m512 voutput_max = _mm512_set1_ps(params->scalar.max);
const __m512 vb = _mm512_set1_ps(*input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc0 = _mm512_loadu_ps(input_a);
input_a += 16;
vacc0 = _mm512_sub_ps(vacc0, vb);
vacc0 = _mm512_max_ps(voutput_min, vacc0);
vacc0 = _mm512_min_ps(voutput_max, vacc0);
_mm512_storeu_ps(output, vacc0);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_sub_ps(vmask, vacc, vb);
vacc = _mm512_maskz_max_ps(vmask, voutput_min, vacc);
vacc = _mm512_maskz_min_ps(vmask, voutput_max, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,888
| 28.515625
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__avx512f_x32(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 voutput_min = _mm512_set1_ps(params->scalar.min);
const __m512 voutput_max = _mm512_set1_ps(params->scalar.max);
const __m512 vb = _mm512_set1_ps(*input_b);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vacc0 = _mm512_loadu_ps(input_a);
__m512 vacc1 = _mm512_loadu_ps(input_a + 16);
input_a += 32;
vacc0 = _mm512_sub_ps(vacc0, vb);
vacc1 = _mm512_sub_ps(vacc1, vb);
vacc0 = _mm512_max_ps(voutput_min, vacc0);
vacc1 = _mm512_max_ps(voutput_min, vacc1);
vacc0 = _mm512_min_ps(voutput_max, vacc0);
vacc1 = _mm512_min_ps(voutput_max, vacc1);
_mm512_storeu_ps(output, vacc0);
_mm512_storeu_ps(output + 16, vacc1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input_a);
input_a += 16;
vacc = _mm512_sub_ps(vacc, vb);
vacc = _mm512_max_ps(voutput_min, vacc);
vacc = _mm512_min_ps(voutput_max, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_sub_ps(vmask, vacc, vb);
vacc = _mm512_maskz_max_ps(vmask, voutput_min, vacc);
vacc = _mm512_maskz_min_ps(vmask, voutput_max, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 2,430
| 29.3875
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-neon.c.in
// Generator: tools/xngen
//
// Copyright 2_lo9 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__neon_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float32x4_t voutput_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t voutput_max = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vb = vld1q_dup_f32(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,710
| 27.04918
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-neon.c.in
// Generator: tools/xngen
//
// Copyright 2_lo9 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__neon_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float32x4_t voutput_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t voutput_max = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vb = vld1q_dup_f32(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vacc_ = vld1q_f32(input_a); input_a += 4;
float32x4_t vaccl = vld1q_f32(input_a); input_a += 4;
vacc_ = vsubq_f32(vacc_, vb);
vaccl = vsubq_f32(vaccl, vb);
vacc_ = vmaxq_f32(vacc_, voutput_min);
vaccl = vmaxq_f32(vaccl, voutput_min);
vacc_ = vminq_f32(vacc_, voutput_max);
vaccl = vminq_f32(vaccl, voutput_max);
vst1q_f32(output, vacc_); output += 4;
vst1q_f32(output, vaccl); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
float32x4_t vacc = vsubq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 2,228
| 27.576923
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va - vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
}
}
| 1,101
| 25.238095
| 75
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va - vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output = vacc;
}
}
| 1,571
| 24.354839
| 75
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.