repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 6,546
| 34.389189
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-nr1-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_nr1_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
input += 12;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
const __m128 vm89AB = _mm_cmple_ps(vz89AB, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
const __m128 vepo89AB = _mm_sub_ps(vminus_two, vemo89AB);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
__m128 vrepo89AB = _mm_rcp_ps(vepo89AB);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_add_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
__m128 vy89AB = _mm_mul_ps(vemo89AB, vrepo89AB);
vy0123 = _mm_blendv_ps(vy0123, vminus_one, vm0123);
vy4567 = _mm_blendv_ps(vy4567, vminus_one, vm4567);
vy89AB = _mm_blendv_ps(vy89AB, vminus_one, vm89AB);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 8,769
| 38.327354
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-nr1-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_nr1_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
input += 16;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
const __m128 vm89AB = _mm_cmple_ps(vz89AB, vsat_cutoff);
const __m128 vmCDEF = _mm_cmple_ps(vzCDEF, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
const __m128 vepo89AB = _mm_sub_ps(vminus_two, vemo89AB);
const __m128 vepoCDEF = _mm_sub_ps(vminus_two, vemoCDEF);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
__m128 vrepo89AB = _mm_rcp_ps(vepo89AB);
__m128 vrepoCDEF = _mm_rcp_ps(vepoCDEF);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_add_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
vrepoCDEF = _mm_mul_ps(vrepoCDEF, _mm_add_ps(_mm_mul_ps(vrepoCDEF, vepoCDEF), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
__m128 vy89AB = _mm_mul_ps(vemo89AB, vrepo89AB);
__m128 vyCDEF = _mm_mul_ps(vemoCDEF, vrepoCDEF);
vy0123 = _mm_blendv_ps(vy0123, vminus_one, vm0123);
vy4567 = _mm_blendv_ps(vy4567, vminus_one, vm4567);
vy89AB = _mm_blendv_ps(vy89AB, vminus_one, vm89AB);
vyCDEF = _mm_blendv_ps(vyCDEF, vminus_one, vmCDEF);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 10,182
| 40.394309
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-nr1-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_nr1_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
const __m128 vxGHIJ = _mm_loadu_ps(input + 16);
input += 20;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vzGHIJ = _mm_or_ps(vxGHIJ, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
const __m128 vinvsignxGHIJ = _mm_xor_ps(vxGHIJ, vzGHIJ);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
const __m128 vm89AB = _mm_cmple_ps(vz89AB, vsat_cutoff);
const __m128 vmCDEF = _mm_cmple_ps(vzCDEF, vsat_cutoff);
const __m128 vmGHIJ = _mm_cmple_ps(vzGHIJ, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
const __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
const __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2), vzGHIJ);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc6, vtGHIJ), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
vpGHIJ = _mm_sub_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vtsGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
const __m128 vsmoGHIJ = _mm_add_ps(vsGHIJ, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vemoGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtsGHIJ), vsmoGHIJ);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
const __m128 vepo89AB = _mm_sub_ps(vminus_two, vemo89AB);
const __m128 vepoCDEF = _mm_sub_ps(vminus_two, vemoCDEF);
const __m128 vepoGHIJ = _mm_sub_ps(vminus_two, vemoGHIJ);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
__m128 vrepo89AB = _mm_rcp_ps(vepo89AB);
__m128 vrepoCDEF = _mm_rcp_ps(vepoCDEF);
__m128 vrepoGHIJ = _mm_rcp_ps(vepoGHIJ);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_add_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
vrepoCDEF = _mm_mul_ps(vrepoCDEF, _mm_add_ps(_mm_mul_ps(vrepoCDEF, vepoCDEF), vminus_two));
vrepoGHIJ = _mm_mul_ps(vrepoGHIJ, _mm_add_ps(_mm_mul_ps(vrepoGHIJ, vepoGHIJ), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
__m128 vy89AB = _mm_mul_ps(vemo89AB, vrepo89AB);
__m128 vyCDEF = _mm_mul_ps(vemoCDEF, vrepoCDEF);
__m128 vyGHIJ = _mm_mul_ps(vemoGHIJ, vrepoGHIJ);
vy0123 = _mm_blendv_ps(vy0123, vminus_one, vm0123);
vy4567 = _mm_blendv_ps(vy4567, vminus_one, vm4567);
vy89AB = _mm_blendv_ps(vy89AB, vminus_one, vm89AB);
vyCDEF = _mm_blendv_ps(vyCDEF, vminus_one, vmCDEF);
vyGHIJ = _mm_blendv_ps(vyGHIJ, vminus_one, vmGHIJ);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
vyGHIJ = _mm_xor_ps(vyGHIJ, vinvsignxGHIJ);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 11,595
| 42.107807
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-nr1-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_nr1_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
const __m128 vxGHIJ = _mm_loadu_ps(input + 16);
const __m128 vxKLMN = _mm_loadu_ps(input + 20);
input += 24;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vzGHIJ = _mm_or_ps(vxGHIJ, vsign_mask);
const __m128 vzKLMN = _mm_or_ps(vxKLMN, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
const __m128 vinvsignxGHIJ = _mm_xor_ps(vxGHIJ, vzGHIJ);
const __m128 vinvsignxKLMN = _mm_xor_ps(vxKLMN, vzKLMN);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
const __m128 vm89AB = _mm_cmple_ps(vz89AB, vsat_cutoff);
const __m128 vmCDEF = _mm_cmple_ps(vzCDEF, vsat_cutoff);
const __m128 vmGHIJ = _mm_cmple_ps(vzGHIJ, vsat_cutoff);
const __m128 vmKLMN = _mm_cmple_ps(vzKLMN, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
__m128 vnKLMN = _mm_add_ps(_mm_mul_ps(vzKLMN, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
const __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
const __m128 vsKLMN = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnKLMN), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
vnKLMN = _mm_sub_ps(vnKLMN, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
const __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2), vzGHIJ);
const __m128 vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2), vzKLMN);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc6, vtGHIJ), vc5);
__m128 vpKLMN = _mm_add_ps(_mm_mul_ps(vc6, vtKLMN), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc4);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
vpGHIJ = _mm_sub_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vminus_two);
vpKLMN = _mm_sub_ps(_mm_mul_ps(vpKLMN, vtKLMN), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vtsGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
const __m128 vsmoGHIJ = _mm_add_ps(vsGHIJ, vminus_one);
const __m128 vtsKLMN = _mm_mul_ps(vtKLMN, vsKLMN);
const __m128 vsmoKLMN = _mm_add_ps(vsKLMN, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vemoGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtsGHIJ), vsmoGHIJ);
const __m128 vemoKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtsKLMN), vsmoKLMN);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
const __m128 vepo89AB = _mm_sub_ps(vminus_two, vemo89AB);
const __m128 vepoCDEF = _mm_sub_ps(vminus_two, vemoCDEF);
const __m128 vepoGHIJ = _mm_sub_ps(vminus_two, vemoGHIJ);
const __m128 vepoKLMN = _mm_sub_ps(vminus_two, vemoKLMN);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
__m128 vrepo89AB = _mm_rcp_ps(vepo89AB);
__m128 vrepoCDEF = _mm_rcp_ps(vepoCDEF);
__m128 vrepoGHIJ = _mm_rcp_ps(vepoGHIJ);
__m128 vrepoKLMN = _mm_rcp_ps(vepoKLMN);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_add_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
vrepoCDEF = _mm_mul_ps(vrepoCDEF, _mm_add_ps(_mm_mul_ps(vrepoCDEF, vepoCDEF), vminus_two));
vrepoGHIJ = _mm_mul_ps(vrepoGHIJ, _mm_add_ps(_mm_mul_ps(vrepoGHIJ, vepoGHIJ), vminus_two));
vrepoKLMN = _mm_mul_ps(vrepoKLMN, _mm_add_ps(_mm_mul_ps(vrepoKLMN, vepoKLMN), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
__m128 vy89AB = _mm_mul_ps(vemo89AB, vrepo89AB);
__m128 vyCDEF = _mm_mul_ps(vemoCDEF, vrepoCDEF);
__m128 vyGHIJ = _mm_mul_ps(vemoGHIJ, vrepoGHIJ);
__m128 vyKLMN = _mm_mul_ps(vemoKLMN, vrepoKLMN);
vy0123 = _mm_blendv_ps(vy0123, vminus_one, vm0123);
vy4567 = _mm_blendv_ps(vy4567, vminus_one, vm4567);
vy89AB = _mm_blendv_ps(vy89AB, vminus_one, vm89AB);
vyCDEF = _mm_blendv_ps(vyCDEF, vminus_one, vmCDEF);
vyGHIJ = _mm_blendv_ps(vyGHIJ, vminus_one, vmGHIJ);
vyKLMN = _mm_blendv_ps(vyKLMN, vminus_one, vmKLMN);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
vyGHIJ = _mm_xor_ps(vyGHIJ, vinvsignxGHIJ);
vyKLMN = _mm_xor_ps(vyKLMN, vinvsignxKLMN);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
_mm_storeu_ps(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 13,008
| 43.55137
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-nr1-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_nr1_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 4,420
| 31.748148
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-nr1-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_nr1_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
vy0123 = _mm_blendv_ps(vy0123, vminus_one, vm0123);
vy4567 = _mm_blendv_ps(vy4567, vminus_one, vm4567);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 7,353
| 35.77
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-nr2-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_nr2_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
input += 12;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
const __m128 vm89AB = _mm_cmple_ps(vz89AB, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
const __m128 vepo89AB = _mm_sub_ps(vminus_two, vemo89AB);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
__m128 vrepo89AB = _mm_rcp_ps(vepo89AB);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_add_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_sub_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_sub_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_sub_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
__m128 vy89AB = _mm_mul_ps(vemo89AB, vrepo89AB);
vy0123 = _mm_blendv_ps(vy0123, vminus_one, vm0123);
vy4567 = _mm_blendv_ps(vy4567, vminus_one, vm4567);
vy89AB = _mm_blendv_ps(vy89AB, vminus_one, vm89AB);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 9,217
| 39.429825
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-nr2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_nr2_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
input += 16;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
const __m128 vm89AB = _mm_cmple_ps(vz89AB, vsat_cutoff);
const __m128 vmCDEF = _mm_cmple_ps(vzCDEF, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
const __m128 vepo89AB = _mm_sub_ps(vminus_two, vemo89AB);
const __m128 vepoCDEF = _mm_sub_ps(vminus_two, vemoCDEF);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
__m128 vrepo89AB = _mm_rcp_ps(vepo89AB);
__m128 vrepoCDEF = _mm_rcp_ps(vepoCDEF);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_add_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
vrepoCDEF = _mm_mul_ps(vrepoCDEF, _mm_add_ps(_mm_mul_ps(vrepoCDEF, vepoCDEF), vminus_two));
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_sub_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_sub_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_sub_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
vrepoCDEF = _mm_mul_ps(vrepoCDEF, _mm_sub_ps(_mm_mul_ps(vrepoCDEF, vepoCDEF), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
__m128 vy89AB = _mm_mul_ps(vemo89AB, vrepo89AB);
__m128 vyCDEF = _mm_mul_ps(vemoCDEF, vrepoCDEF);
vy0123 = _mm_blendv_ps(vy0123, vminus_one, vm0123);
vy4567 = _mm_blendv_ps(vy4567, vminus_one, vm4567);
vy89AB = _mm_blendv_ps(vy89AB, vminus_one, vm89AB);
vyCDEF = _mm_blendv_ps(vyCDEF, vminus_one, vmCDEF);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 10,726
| 41.56746
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-nr2-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_nr2_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
const __m128 vxGHIJ = _mm_loadu_ps(input + 16);
input += 20;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vzGHIJ = _mm_or_ps(vxGHIJ, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
const __m128 vinvsignxGHIJ = _mm_xor_ps(vxGHIJ, vzGHIJ);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
const __m128 vm89AB = _mm_cmple_ps(vz89AB, vsat_cutoff);
const __m128 vmCDEF = _mm_cmple_ps(vzCDEF, vsat_cutoff);
const __m128 vmGHIJ = _mm_cmple_ps(vzGHIJ, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
const __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
const __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2), vzGHIJ);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc6, vtGHIJ), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
vpGHIJ = _mm_sub_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vtsGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
const __m128 vsmoGHIJ = _mm_add_ps(vsGHIJ, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vemoGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtsGHIJ), vsmoGHIJ);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
const __m128 vepo89AB = _mm_sub_ps(vminus_two, vemo89AB);
const __m128 vepoCDEF = _mm_sub_ps(vminus_two, vemoCDEF);
const __m128 vepoGHIJ = _mm_sub_ps(vminus_two, vemoGHIJ);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
__m128 vrepo89AB = _mm_rcp_ps(vepo89AB);
__m128 vrepoCDEF = _mm_rcp_ps(vepoCDEF);
__m128 vrepoGHIJ = _mm_rcp_ps(vepoGHIJ);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_add_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
vrepoCDEF = _mm_mul_ps(vrepoCDEF, _mm_add_ps(_mm_mul_ps(vrepoCDEF, vepoCDEF), vminus_two));
vrepoGHIJ = _mm_mul_ps(vrepoGHIJ, _mm_add_ps(_mm_mul_ps(vrepoGHIJ, vepoGHIJ), vminus_two));
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_sub_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_sub_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_sub_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
vrepoCDEF = _mm_mul_ps(vrepoCDEF, _mm_sub_ps(_mm_mul_ps(vrepoCDEF, vepoCDEF), vminus_two));
vrepoGHIJ = _mm_mul_ps(vrepoGHIJ, _mm_sub_ps(_mm_mul_ps(vrepoGHIJ, vepoGHIJ), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
__m128 vy89AB = _mm_mul_ps(vemo89AB, vrepo89AB);
__m128 vyCDEF = _mm_mul_ps(vemoCDEF, vrepoCDEF);
__m128 vyGHIJ = _mm_mul_ps(vemoGHIJ, vrepoGHIJ);
vy0123 = _mm_blendv_ps(vy0123, vminus_one, vm0123);
vy4567 = _mm_blendv_ps(vy4567, vminus_one, vm4567);
vy89AB = _mm_blendv_ps(vy89AB, vminus_one, vm89AB);
vyCDEF = _mm_blendv_ps(vyCDEF, vminus_one, vmCDEF);
vyGHIJ = _mm_blendv_ps(vyGHIJ, vminus_one, vmGHIJ);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
vyGHIJ = _mm_xor_ps(vyGHIJ, vinvsignxGHIJ);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 12,235
| 43.333333
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-nr2-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_nr2_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
const __m128 vxGHIJ = _mm_loadu_ps(input + 16);
const __m128 vxKLMN = _mm_loadu_ps(input + 20);
input += 24;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vzGHIJ = _mm_or_ps(vxGHIJ, vsign_mask);
const __m128 vzKLMN = _mm_or_ps(vxKLMN, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
const __m128 vinvsignxGHIJ = _mm_xor_ps(vxGHIJ, vzGHIJ);
const __m128 vinvsignxKLMN = _mm_xor_ps(vxKLMN, vzKLMN);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
const __m128 vm89AB = _mm_cmple_ps(vz89AB, vsat_cutoff);
const __m128 vmCDEF = _mm_cmple_ps(vzCDEF, vsat_cutoff);
const __m128 vmGHIJ = _mm_cmple_ps(vzGHIJ, vsat_cutoff);
const __m128 vmKLMN = _mm_cmple_ps(vzKLMN, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
__m128 vnKLMN = _mm_add_ps(_mm_mul_ps(vzKLMN, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
const __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
const __m128 vsKLMN = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnKLMN), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
vnKLMN = _mm_sub_ps(vnKLMN, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
const __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2), vzGHIJ);
const __m128 vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2), vzKLMN);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc6, vtGHIJ), vc5);
__m128 vpKLMN = _mm_add_ps(_mm_mul_ps(vc6, vtKLMN), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc4);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
vpGHIJ = _mm_sub_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vminus_two);
vpKLMN = _mm_sub_ps(_mm_mul_ps(vpKLMN, vtKLMN), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vtsGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
const __m128 vsmoGHIJ = _mm_add_ps(vsGHIJ, vminus_one);
const __m128 vtsKLMN = _mm_mul_ps(vtKLMN, vsKLMN);
const __m128 vsmoKLMN = _mm_add_ps(vsKLMN, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vemoGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtsGHIJ), vsmoGHIJ);
const __m128 vemoKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtsKLMN), vsmoKLMN);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
const __m128 vepo89AB = _mm_sub_ps(vminus_two, vemo89AB);
const __m128 vepoCDEF = _mm_sub_ps(vminus_two, vemoCDEF);
const __m128 vepoGHIJ = _mm_sub_ps(vminus_two, vemoGHIJ);
const __m128 vepoKLMN = _mm_sub_ps(vminus_two, vemoKLMN);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
__m128 vrepo89AB = _mm_rcp_ps(vepo89AB);
__m128 vrepoCDEF = _mm_rcp_ps(vepoCDEF);
__m128 vrepoGHIJ = _mm_rcp_ps(vepoGHIJ);
__m128 vrepoKLMN = _mm_rcp_ps(vepoKLMN);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_add_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
vrepoCDEF = _mm_mul_ps(vrepoCDEF, _mm_add_ps(_mm_mul_ps(vrepoCDEF, vepoCDEF), vminus_two));
vrepoGHIJ = _mm_mul_ps(vrepoGHIJ, _mm_add_ps(_mm_mul_ps(vrepoGHIJ, vepoGHIJ), vminus_two));
vrepoKLMN = _mm_mul_ps(vrepoKLMN, _mm_add_ps(_mm_mul_ps(vrepoKLMN, vepoKLMN), vminus_two));
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_sub_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_sub_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_sub_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
vrepoCDEF = _mm_mul_ps(vrepoCDEF, _mm_sub_ps(_mm_mul_ps(vrepoCDEF, vepoCDEF), vminus_two));
vrepoGHIJ = _mm_mul_ps(vrepoGHIJ, _mm_sub_ps(_mm_mul_ps(vrepoGHIJ, vepoGHIJ), vminus_two));
vrepoKLMN = _mm_mul_ps(vrepoKLMN, _mm_sub_ps(_mm_mul_ps(vrepoKLMN, vepoKLMN), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
__m128 vy89AB = _mm_mul_ps(vemo89AB, vrepo89AB);
__m128 vyCDEF = _mm_mul_ps(vemoCDEF, vrepoCDEF);
__m128 vyGHIJ = _mm_mul_ps(vemoGHIJ, vrepoGHIJ);
__m128 vyKLMN = _mm_mul_ps(vemoKLMN, vrepoKLMN);
vy0123 = _mm_blendv_ps(vy0123, vminus_one, vm0123);
vy4567 = _mm_blendv_ps(vy4567, vminus_one, vm4567);
vy89AB = _mm_blendv_ps(vy89AB, vminus_one, vm89AB);
vyCDEF = _mm_blendv_ps(vyCDEF, vminus_one, vmCDEF);
vyGHIJ = _mm_blendv_ps(vyGHIJ, vminus_one, vmGHIJ);
vyKLMN = _mm_blendv_ps(vyKLMN, vminus_one, vmKLMN);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
vyGHIJ = _mm_xor_ps(vyGHIJ, vinvsignxGHIJ);
vyKLMN = _mm_xor_ps(vyKLMN, vinvsignxKLMN);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
_mm_storeu_ps(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 13,744
| 44.816667
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-nr2-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_nr2_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 4,580
| 32.437956
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-nr2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_nr2_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_sub_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_sub_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
vy0123 = _mm_blendv_ps(vy0123, vminus_one, vm0123);
vy4567 = _mm_blendv_ps(vy4567, vminus_one, vm4567);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_blendv_ps(vy, vminus_one, vm);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 7,705
| 36.77451
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasm-expm1minus-rr1-lut8-p4h3ts-div-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasm_expm1minus_rr1_lut8_p4h3ts_div_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_lut8_p4h3.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_lut8_p4h3.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_lut8_p4h3.magic_bias;
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = params->scalar_expm1minus_rr1_lut8_p4h3.ln2;
const float vc4 = params->scalar_expm1minus_rr1_lut8_p4h3.c4;
const float vc3 = params->scalar_expm1minus_rr1_lut8_p4h3.c3;
const float vc2 = params->scalar_expm1minus_rr1_lut8_p4h3.c2;
const float vminus_two = params->scalar_expm1minus_rr1_lut8_p4h3.minus_two;
const float vone = params->scalar_expm1minus_rr1_lut8_p4h3.one;
do {
const float vx = *input++;
float vz = fabsf(vx);
vz = __builtin_wasm_min_f32(vz, vsat_cutoff);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
uint32_t ve = vb << 20;
ve += vl;
const float vs = uint32_as_float(ve);
const float vt = vn * vln2 + vz;
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 2,403
| 27.619048
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasm-expm1minus-rr1-lut8-p4h3ts-div-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasm_expm1minus_rr1_lut8_p4h3ts_div_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_lut8_p4h3.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_lut8_p4h3.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_lut8_p4h3.magic_bias;
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = params->scalar_expm1minus_rr1_lut8_p4h3.ln2;
const float vc4 = params->scalar_expm1minus_rr1_lut8_p4h3.c4;
const float vc3 = params->scalar_expm1minus_rr1_lut8_p4h3.c3;
const float vc2 = params->scalar_expm1minus_rr1_lut8_p4h3.c2;
const float vminus_two = params->scalar_expm1minus_rr1_lut8_p4h3.minus_two;
const float vone = params->scalar_expm1minus_rr1_lut8_p4h3.one;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
float vz0 = fabsf(vx0);
float vz1 = fabsf(vx1);
vz0 = __builtin_wasm_min_f32(vz0, vsat_cutoff);
vz1 = __builtin_wasm_min_f32(vz1, vsat_cutoff);
float vn0 = vz0 * vminus_log2e + vmagic_bias;
float vn1 = vz1 * vminus_log2e + vmagic_bias;
const uint32_t vb0 = float_as_uint32(vn0);
vn0 -= vmagic_bias;
const uint32_t vb1 = float_as_uint32(vn1);
vn1 -= vmagic_bias;
const uint32_t vidx0 = vb0 & vindex_mask;
const uint32_t vidx1 = vb1 & vindex_mask;
const uint32_t vl0 = xnn_table_exp2minus_k_over_8[vidx0];
uint32_t ve0 = vb0 << 20;
const uint32_t vl1 = xnn_table_exp2minus_k_over_8[vidx1];
uint32_t ve1 = vb1 << 20;
ve0 += vl0;
ve1 += vl1;
const float vt0 = vn0 * vln2 + vz0;
const float vs0 = uint32_as_float(ve0);
const float vt1 = vn1 * vln2 + vz1;
const float vs1 = uint32_as_float(ve1);
float vp0 = vc4 * vt0 + vc3;
float vp1 = vc4 * vt1 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp0 = vp0 * vt0 + vminus_two;
vp1 = vp1 * vt1 + vminus_two;
const float vts0 = vt0 * vs0;
const float vsmo0 = vs0 - vone;
const float vts1 = vt1 * vs1;
const float vsmo1 = vs1 - vone;
const float vemo0 = vp0 * vts0 + vsmo0;
const float vemo1 = vp1 * vts1 + vsmo1;
const float vepo0 = vemo0 - vminus_two;
const float vepo1 = vemo1 - vminus_two;
float vy0 = vemo0 / vepo0;
float vy1 = vemo1 / vepo1;
vy0 = copysignf(vy0, vx0);
vy1 = copysignf(vy1, vx1);
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
float vz = fabsf(vx);
vz = __builtin_wasm_min_f32(vz, vsat_cutoff);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
uint32_t ve = vb << 20;
ve += vl;
const float vs = uint32_as_float(ve);
const float vt = vn * vln2 + vz;
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output = vy;
}
}
| 4,106
| 27.324138
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasm-expm1minus-rr1-lut8-p4h3ts-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasm_expm1minus_rr1_lut8_p4h3ts_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_lut8_p4h3.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_lut8_p4h3.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_lut8_p4h3.magic_bias;
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = params->scalar_expm1minus_rr1_lut8_p4h3.ln2;
const float vc4 = params->scalar_expm1minus_rr1_lut8_p4h3.c4;
const float vc3 = params->scalar_expm1minus_rr1_lut8_p4h3.c3;
const float vc2 = params->scalar_expm1minus_rr1_lut8_p4h3.c2;
const float vminus_two = params->scalar_expm1minus_rr1_lut8_p4h3.minus_two;
const float vone = params->scalar_expm1minus_rr1_lut8_p4h3.one;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
float vz0 = fabsf(vx0);
float vz1 = fabsf(vx1);
float vz2 = fabsf(vx2);
float vz3 = fabsf(vx3);
vz0 = __builtin_wasm_min_f32(vz0, vsat_cutoff);
vz1 = __builtin_wasm_min_f32(vz1, vsat_cutoff);
vz2 = __builtin_wasm_min_f32(vz2, vsat_cutoff);
vz3 = __builtin_wasm_min_f32(vz3, vsat_cutoff);
float vn0 = vz0 * vminus_log2e + vmagic_bias;
float vn1 = vz1 * vminus_log2e + vmagic_bias;
float vn2 = vz2 * vminus_log2e + vmagic_bias;
float vn3 = vz3 * vminus_log2e + vmagic_bias;
const uint32_t vb0 = float_as_uint32(vn0);
vn0 -= vmagic_bias;
const uint32_t vb1 = float_as_uint32(vn1);
vn1 -= vmagic_bias;
const uint32_t vb2 = float_as_uint32(vn2);
vn2 -= vmagic_bias;
const uint32_t vb3 = float_as_uint32(vn3);
vn3 -= vmagic_bias;
const uint32_t vidx0 = vb0 & vindex_mask;
const uint32_t vidx1 = vb1 & vindex_mask;
const uint32_t vidx2 = vb2 & vindex_mask;
const uint32_t vidx3 = vb3 & vindex_mask;
const uint32_t vl0 = xnn_table_exp2minus_k_over_8[vidx0];
uint32_t ve0 = vb0 << 20;
const uint32_t vl1 = xnn_table_exp2minus_k_over_8[vidx1];
uint32_t ve1 = vb1 << 20;
const uint32_t vl2 = xnn_table_exp2minus_k_over_8[vidx2];
uint32_t ve2 = vb2 << 20;
const uint32_t vl3 = xnn_table_exp2minus_k_over_8[vidx3];
uint32_t ve3 = vb3 << 20;
ve0 += vl0;
ve1 += vl1;
ve2 += vl2;
ve3 += vl3;
const float vt0 = vn0 * vln2 + vz0;
const float vs0 = uint32_as_float(ve0);
const float vt1 = vn1 * vln2 + vz1;
const float vs1 = uint32_as_float(ve1);
const float vt2 = vn2 * vln2 + vz2;
const float vs2 = uint32_as_float(ve2);
const float vt3 = vn3 * vln2 + vz3;
const float vs3 = uint32_as_float(ve3);
float vp0 = vc4 * vt0 + vc3;
float vp1 = vc4 * vt1 + vc3;
float vp2 = vc4 * vt2 + vc3;
float vp3 = vc4 * vt3 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp2 = vp2 * vt2 + vc2;
vp3 = vp3 * vt3 + vc2;
vp0 = vp0 * vt0 + vminus_two;
vp1 = vp1 * vt1 + vminus_two;
vp2 = vp2 * vt2 + vminus_two;
vp3 = vp3 * vt3 + vminus_two;
const float vts0 = vt0 * vs0;
const float vsmo0 = vs0 - vone;
const float vts1 = vt1 * vs1;
const float vsmo1 = vs1 - vone;
const float vts2 = vt2 * vs2;
const float vsmo2 = vs2 - vone;
const float vts3 = vt3 * vs3;
const float vsmo3 = vs3 - vone;
const float vemo0 = vp0 * vts0 + vsmo0;
const float vemo1 = vp1 * vts1 + vsmo1;
const float vemo2 = vp2 * vts2 + vsmo2;
const float vemo3 = vp3 * vts3 + vsmo3;
const float vepo0 = vemo0 - vminus_two;
const float vepo1 = vemo1 - vminus_two;
const float vepo2 = vemo2 - vminus_two;
const float vepo3 = vemo3 - vminus_two;
float vy0 = vemo0 / vepo0;
float vy1 = vemo1 / vepo1;
float vy2 = vemo2 / vepo2;
float vy3 = vemo3 / vepo3;
vy0 = copysignf(vy0, vx0);
vy1 = copysignf(vy1, vx1);
vy2 = copysignf(vy2, vx2);
vy3 = copysignf(vy3, vx3);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
float vz = fabsf(vx);
vz = __builtin_wasm_min_f32(vz, vsat_cutoff);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
uint32_t ve = vb << 20;
ve += vl;
const float vs = uint32_as_float(ve);
const float vt = vn * vln2 + vz;
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,832
| 29.222798
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasm-expm1minus-rr1-p6h5ts-div-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__wasm_expm1minus_rr1_p6h5ts_div_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_p6h5.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_p6h5.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_p6h5.magic_bias;
const float vln2 = params->scalar_expm1minus_rr1_p6h5.ln2;
const float vc6 = params->scalar_expm1minus_rr1_p6h5.c6;
const float vc5 = params->scalar_expm1minus_rr1_p6h5.c5;
const float vc4 = params->scalar_expm1minus_rr1_p6h5.c4;
const float vc3 = params->scalar_expm1minus_rr1_p6h5.c3;
const float vc2 = params->scalar_expm1minus_rr1_p6h5.c2;
const float vminus_two = params->scalar_expm1minus_rr1_p6h5.minus_two;
const float vone = params->scalar_expm1minus_rr1_p6h5.one;
do {
const float vx = *input++;
float vz = fabsf(vx);
vz = __builtin_wasm_min_f32(vz, vsat_cutoff);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t ve = vb << 23;
const float vs = uint32_as_float(ve);
const float vt = vn * vln2 + vz;
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 2,292
| 26.963415
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasm-expm1minus-rr1-p6h5ts-div-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__wasm_expm1minus_rr1_p6h5ts_div_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_p6h5.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_p6h5.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_p6h5.magic_bias;
const float vln2 = params->scalar_expm1minus_rr1_p6h5.ln2;
const float vc6 = params->scalar_expm1minus_rr1_p6h5.c6;
const float vc5 = params->scalar_expm1minus_rr1_p6h5.c5;
const float vc4 = params->scalar_expm1minus_rr1_p6h5.c4;
const float vc3 = params->scalar_expm1minus_rr1_p6h5.c3;
const float vc2 = params->scalar_expm1minus_rr1_p6h5.c2;
const float vminus_two = params->scalar_expm1minus_rr1_p6h5.minus_two;
const float vone = params->scalar_expm1minus_rr1_p6h5.one;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
float vz0 = fabsf(vx0);
float vz1 = fabsf(vx1);
vz0 = __builtin_wasm_min_f32(vz0, vsat_cutoff);
vz1 = __builtin_wasm_min_f32(vz1, vsat_cutoff);
float vn0 = vz0 * vminus_log2e + vmagic_bias;
float vn1 = vz1 * vminus_log2e + vmagic_bias;
const uint32_t vb0 = float_as_uint32(vn0);
vn0 -= vmagic_bias;
const uint32_t vb1 = float_as_uint32(vn1);
vn1 -= vmagic_bias;
const uint32_t ve0 = vb0 << 23;
const uint32_t ve1 = vb1 << 23;
const float vt0 = vn0 * vln2 + vz0;
const float vs0 = uint32_as_float(ve0);
const float vt1 = vn1 * vln2 + vz1;
const float vs1 = uint32_as_float(ve1);
float vp0 = vc6 * vt0 + vc5;
float vp1 = vc6 * vt1 + vc5;
vp0 = vp0 * vt0 + vc4;
vp1 = vp1 * vt1 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp0 = vp0 * vt0 + vminus_two;
vp1 = vp1 * vt1 + vminus_two;
const float vts0 = vt0 * vs0;
const float vsmo0 = vs0 - vone;
const float vts1 = vt1 * vs1;
const float vsmo1 = vs1 - vone;
const float vemo0 = vp0 * vts0 + vsmo0;
const float vemo1 = vp1 * vts1 + vsmo1;
const float vepo0 = vemo0 - vminus_two;
const float vepo1 = vemo1 - vminus_two;
float vy0 = vemo0 / vepo0;
float vy1 = vemo1 / vepo1;
vy0 = copysignf(vy0, vx0);
vy1 = copysignf(vy1, vx1);
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
float vz = fabsf(vx);
vz = __builtin_wasm_min_f32(vz, vsat_cutoff);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t ve = vb << 23;
const float vs = uint32_as_float(ve);
const float vt = vn * vln2 + vz;
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output = vy;
}
}
| 3,865
| 26.81295
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasm-expm1minus-rr1-p6h5ts-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__wasm_expm1minus_rr1_p6h5ts_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_p6h5.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_p6h5.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_p6h5.magic_bias;
const float vln2 = params->scalar_expm1minus_rr1_p6h5.ln2;
const float vc6 = params->scalar_expm1minus_rr1_p6h5.c6;
const float vc5 = params->scalar_expm1minus_rr1_p6h5.c5;
const float vc4 = params->scalar_expm1minus_rr1_p6h5.c4;
const float vc3 = params->scalar_expm1minus_rr1_p6h5.c3;
const float vc2 = params->scalar_expm1minus_rr1_p6h5.c2;
const float vminus_two = params->scalar_expm1minus_rr1_p6h5.minus_two;
const float vone = params->scalar_expm1minus_rr1_p6h5.one;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
float vz0 = fabsf(vx0);
float vz1 = fabsf(vx1);
float vz2 = fabsf(vx2);
float vz3 = fabsf(vx3);
vz0 = __builtin_wasm_min_f32(vz0, vsat_cutoff);
vz1 = __builtin_wasm_min_f32(vz1, vsat_cutoff);
vz2 = __builtin_wasm_min_f32(vz2, vsat_cutoff);
vz3 = __builtin_wasm_min_f32(vz3, vsat_cutoff);
float vn0 = vz0 * vminus_log2e + vmagic_bias;
float vn1 = vz1 * vminus_log2e + vmagic_bias;
float vn2 = vz2 * vminus_log2e + vmagic_bias;
float vn3 = vz3 * vminus_log2e + vmagic_bias;
const uint32_t vb0 = float_as_uint32(vn0);
vn0 -= vmagic_bias;
const uint32_t vb1 = float_as_uint32(vn1);
vn1 -= vmagic_bias;
const uint32_t vb2 = float_as_uint32(vn2);
vn2 -= vmagic_bias;
const uint32_t vb3 = float_as_uint32(vn3);
vn3 -= vmagic_bias;
const uint32_t ve0 = vb0 << 23;
const uint32_t ve1 = vb1 << 23;
const uint32_t ve2 = vb2 << 23;
const uint32_t ve3 = vb3 << 23;
const float vt0 = vn0 * vln2 + vz0;
const float vs0 = uint32_as_float(ve0);
const float vt1 = vn1 * vln2 + vz1;
const float vs1 = uint32_as_float(ve1);
const float vt2 = vn2 * vln2 + vz2;
const float vs2 = uint32_as_float(ve2);
const float vt3 = vn3 * vln2 + vz3;
const float vs3 = uint32_as_float(ve3);
float vp0 = vc6 * vt0 + vc5;
float vp1 = vc6 * vt1 + vc5;
float vp2 = vc6 * vt2 + vc5;
float vp3 = vc6 * vt3 + vc5;
vp0 = vp0 * vt0 + vc4;
vp1 = vp1 * vt1 + vc4;
vp2 = vp2 * vt2 + vc4;
vp3 = vp3 * vt3 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp2 = vp2 * vt2 + vc3;
vp3 = vp3 * vt3 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp2 = vp2 * vt2 + vc2;
vp3 = vp3 * vt3 + vc2;
vp0 = vp0 * vt0 + vminus_two;
vp1 = vp1 * vt1 + vminus_two;
vp2 = vp2 * vt2 + vminus_two;
vp3 = vp3 * vt3 + vminus_two;
const float vts0 = vt0 * vs0;
const float vsmo0 = vs0 - vone;
const float vts1 = vt1 * vs1;
const float vsmo1 = vs1 - vone;
const float vts2 = vt2 * vs2;
const float vsmo2 = vs2 - vone;
const float vts3 = vt3 * vs3;
const float vsmo3 = vs3 - vone;
const float vemo0 = vp0 * vts0 + vsmo0;
const float vemo1 = vp1 * vts1 + vsmo1;
const float vemo2 = vp2 * vts2 + vsmo2;
const float vemo3 = vp3 * vts3 + vsmo3;
const float vepo0 = vemo0 - vminus_two;
const float vepo1 = vemo1 - vminus_two;
const float vepo2 = vemo2 - vminus_two;
const float vepo3 = vemo3 - vminus_two;
float vy0 = vemo0 / vepo0;
float vy1 = vemo1 / vepo1;
float vy2 = vemo2 / vepo2;
float vy3 = vemo3 / vepo3;
vy0 = copysignf(vy0, vx0);
vy1 = copysignf(vy1, vx1);
vy2 = copysignf(vy2, vx2);
vy3 = copysignf(vy3, vx3);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
float vz = fabsf(vx);
vz = __builtin_wasm_min_f32(vz, vsat_cutoff);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t ve = vb << 23;
const float vs = uint32_as_float(ve);
const float vt = vn * vln2 + vz;
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,461
| 28.524324
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-abs-min-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_abs_min_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.index_mask);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sign_mask);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
v128_t vz0123 = wasm_f32x4_abs(vx0123);
v128_t vz4567 = wasm_f32x4_abs(vx4567);
v128_t vz89AB = wasm_f32x4_abs(vx89AB);
vz0123 = wasm_f32x4_min(vz0123, vsat_cutoff);
vz4567 = wasm_f32x4_min(vz4567, vsat_cutoff);
vz89AB = wasm_f32x4_min(vz89AB, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 20);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 20);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 20);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const uint64_t vidx01 = wasm_u64x2_extract_lane(vidx0123, 0);
const uint64_t vidx45 = wasm_u64x2_extract_lane(vidx4567, 0);
const uint64_t vidx89 = wasm_u64x2_extract_lane(vidx89AB, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx01));
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx45));
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx89));
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx01 >> 32)), vl0123, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx45 >> 32)), vl4567, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx89 >> 32)), vl89AB, 1);
const uint64_t vidx23 = wasm_u64x2_extract_lane(vidx0123, 1);
const uint64_t vidx67 = wasm_u64x2_extract_lane(vidx4567, 1);
const uint64_t vidxAB = wasm_u64x2_extract_lane(vidx89AB, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx23), vl0123, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx67), vl4567, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxAB), vl89AB, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx23 >> 32)), vl0123, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx67 >> 32)), vl4567, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxAB >> 32)), vl89AB, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2), vz89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt0123), vc3);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt4567), vc3);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vminus_two);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vminus_two);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vminus_two);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vepo0123 = wasm_f32x4_sub(vemo0123, vminus_two);
const v128_t vepo4567 = wasm_f32x4_sub(vemo4567, vminus_two);
const v128_t vepo89AB = wasm_f32x4_sub(vemo89AB, vminus_two);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
vy0123 = wasm_v128_bitselect(vx0123, vy0123, vsign_mask);
vy4567 = wasm_v128_bitselect(vx4567, vy4567, vsign_mask);
vy89AB = wasm_v128_bitselect(vx89AB, vy89AB, vsign_mask);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 11,327
| 46.596639
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-abs-min-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_abs_min_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.index_mask);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
v128_t vz0123 = wasm_f32x4_abs(vx0123);
v128_t vz4567 = wasm_f32x4_abs(vx4567);
v128_t vz89AB = wasm_f32x4_abs(vx89AB);
v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
vz0123 = wasm_f32x4_min(vz0123, vsat_cutoff);
vz4567 = wasm_f32x4_min(vz4567, vsat_cutoff);
vz89AB = wasm_f32x4_min(vz89AB, vsat_cutoff);
vzCDEF = wasm_f32x4_min(vzCDEF, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 20);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 20);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 20);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t veCDEF = wasm_i32x4_shl(vnCDEF, 20);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const uint64_t vidx01 = wasm_u64x2_extract_lane(vidx0123, 0);
const uint64_t vidx45 = wasm_u64x2_extract_lane(vidx4567, 0);
const uint64_t vidx89 = wasm_u64x2_extract_lane(vidx89AB, 0);
const uint64_t vidxCD = wasm_u64x2_extract_lane(vidxCDEF, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx01));
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx45));
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx89));
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxCD));
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx01 >> 32)), vl0123, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx45 >> 32)), vl4567, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx89 >> 32)), vl89AB, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxCD >> 32)), vlCDEF, 1);
const uint64_t vidx23 = wasm_u64x2_extract_lane(vidx0123, 1);
const uint64_t vidx67 = wasm_u64x2_extract_lane(vidx4567, 1);
const uint64_t vidxAB = wasm_u64x2_extract_lane(vidx89AB, 1);
const uint64_t vidxEF = wasm_u64x2_extract_lane(vidxCDEF, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx23), vl0123, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx67), vl4567, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxAB), vl89AB, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxEF), vlCDEF, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx23 >> 32)), vl0123, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx67 >> 32)), vl4567, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxAB >> 32)), vl89AB, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxEF >> 32)), vlCDEF, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vsCDEF = wasm_i32x4_add(vlCDEF, veCDEF);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2), vz89AB);
const v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2), vzCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt0123), vc3);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt4567), vc3);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt89AB), vc3);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc4, vtCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vminus_two);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vminus_two);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vminus_two);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vminus_two);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vtsCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
const v128_t vsmoCDEF = wasm_f32x4_sub(vsCDEF, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vemoCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtsCDEF), vsmoCDEF);
const v128_t vepo0123 = wasm_f32x4_sub(vemo0123, vminus_two);
const v128_t vepo4567 = wasm_f32x4_sub(vemo4567, vminus_two);
const v128_t vepo89AB = wasm_f32x4_sub(vemo89AB, vminus_two);
const v128_t vepoCDEF = wasm_f32x4_sub(vemoCDEF, vminus_two);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
v128_t vyCDEF = wasm_f32x4_div(vemoCDEF, vepoCDEF);
vy0123 = wasm_v128_bitselect(vx0123, vy0123, vsign_mask);
vy4567 = wasm_v128_bitselect(vx4567, vy4567, vsign_mask);
vy89AB = wasm_v128_bitselect(vx89AB, vy89AB, vsign_mask);
vyCDEF = wasm_v128_bitselect(vxCDEF, vyCDEF, vsign_mask);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 13,170
| 49.079848
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-abs-min-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_abs_min_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.index_mask);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sign_mask);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 5,683
| 39.892086
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-abs-min-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_abs_min_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.index_mask);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sign_mask);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
v128_t vz0123 = wasm_f32x4_abs(vx0123);
v128_t vz4567 = wasm_f32x4_abs(vx4567);
vz0123 = wasm_f32x4_min(vz0123, vsat_cutoff);
vz4567 = wasm_f32x4_min(vz4567, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 20);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 20);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const uint64_t vidx01 = wasm_u64x2_extract_lane(vidx0123, 0);
const uint64_t vidx45 = wasm_u64x2_extract_lane(vidx4567, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx01));
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx45));
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx01 >> 32)), vl0123, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx45 >> 32)), vl4567, 1);
const uint64_t vidx23 = wasm_u64x2_extract_lane(vidx0123, 1);
const uint64_t vidx67 = wasm_u64x2_extract_lane(vidx4567, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx23), vl0123, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx67), vl4567, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx23 >> 32)), vl0123, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx67 >> 32)), vl4567, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2), vz4567);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt0123), vc3);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt4567), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vminus_two);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vminus_two);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vepo0123 = wasm_f32x4_sub(vemo0123, vminus_two);
const v128_t vepo4567 = wasm_f32x4_sub(vemo4567, vminus_two);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
vy0123 = wasm_v128_bitselect(vx0123, vy0123, vsign_mask);
vy4567 = wasm_v128_bitselect(vx4567, vy4567, vsign_mask);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,481
| 43.516432
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-abs-pmin-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_abs_pmin_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.index_mask);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sign_mask);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
v128_t vz0123 = wasm_f32x4_abs(vx0123);
v128_t vz4567 = wasm_f32x4_abs(vx4567);
v128_t vz89AB = wasm_f32x4_abs(vx89AB);
vz0123 = wasm_f32x4_pmin(vz0123, vsat_cutoff);
vz4567 = wasm_f32x4_pmin(vz4567, vsat_cutoff);
vz89AB = wasm_f32x4_pmin(vz89AB, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 20);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 20);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 20);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const uint64_t vidx01 = wasm_u64x2_extract_lane(vidx0123, 0);
const uint64_t vidx45 = wasm_u64x2_extract_lane(vidx4567, 0);
const uint64_t vidx89 = wasm_u64x2_extract_lane(vidx89AB, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx01));
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx45));
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx89));
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx01 >> 32)), vl0123, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx45 >> 32)), vl4567, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx89 >> 32)), vl89AB, 1);
const uint64_t vidx23 = wasm_u64x2_extract_lane(vidx0123, 1);
const uint64_t vidx67 = wasm_u64x2_extract_lane(vidx4567, 1);
const uint64_t vidxAB = wasm_u64x2_extract_lane(vidx89AB, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx23), vl0123, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx67), vl4567, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxAB), vl89AB, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx23 >> 32)), vl0123, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx67 >> 32)), vl4567, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxAB >> 32)), vl89AB, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2), vz89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt0123), vc3);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt4567), vc3);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vminus_two);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vminus_two);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vminus_two);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vepo0123 = wasm_f32x4_sub(vemo0123, vminus_two);
const v128_t vepo4567 = wasm_f32x4_sub(vemo4567, vminus_two);
const v128_t vepo89AB = wasm_f32x4_sub(vemo89AB, vminus_two);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
vy0123 = wasm_v128_bitselect(vx0123, vy0123, vsign_mask);
vy4567 = wasm_v128_bitselect(vx4567, vy4567, vsign_mask);
vy89AB = wasm_v128_bitselect(vx89AB, vy89AB, vsign_mask);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 11,333
| 46.621849
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-abs-pmin-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_abs_pmin_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.index_mask);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
v128_t vz0123 = wasm_f32x4_abs(vx0123);
v128_t vz4567 = wasm_f32x4_abs(vx4567);
v128_t vz89AB = wasm_f32x4_abs(vx89AB);
v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
vz0123 = wasm_f32x4_pmin(vz0123, vsat_cutoff);
vz4567 = wasm_f32x4_pmin(vz4567, vsat_cutoff);
vz89AB = wasm_f32x4_pmin(vz89AB, vsat_cutoff);
vzCDEF = wasm_f32x4_pmin(vzCDEF, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 20);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 20);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 20);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t veCDEF = wasm_i32x4_shl(vnCDEF, 20);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const uint64_t vidx01 = wasm_u64x2_extract_lane(vidx0123, 0);
const uint64_t vidx45 = wasm_u64x2_extract_lane(vidx4567, 0);
const uint64_t vidx89 = wasm_u64x2_extract_lane(vidx89AB, 0);
const uint64_t vidxCD = wasm_u64x2_extract_lane(vidxCDEF, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx01));
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx45));
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx89));
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxCD));
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx01 >> 32)), vl0123, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx45 >> 32)), vl4567, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx89 >> 32)), vl89AB, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxCD >> 32)), vlCDEF, 1);
const uint64_t vidx23 = wasm_u64x2_extract_lane(vidx0123, 1);
const uint64_t vidx67 = wasm_u64x2_extract_lane(vidx4567, 1);
const uint64_t vidxAB = wasm_u64x2_extract_lane(vidx89AB, 1);
const uint64_t vidxEF = wasm_u64x2_extract_lane(vidxCDEF, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx23), vl0123, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx67), vl4567, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxAB), vl89AB, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxEF), vlCDEF, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx23 >> 32)), vl0123, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx67 >> 32)), vl4567, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxAB >> 32)), vl89AB, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxEF >> 32)), vlCDEF, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vsCDEF = wasm_i32x4_add(vlCDEF, veCDEF);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2), vz89AB);
const v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2), vzCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt0123), vc3);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt4567), vc3);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt89AB), vc3);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc4, vtCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vminus_two);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vminus_two);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vminus_two);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vminus_two);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vtsCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
const v128_t vsmoCDEF = wasm_f32x4_sub(vsCDEF, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vemoCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtsCDEF), vsmoCDEF);
const v128_t vepo0123 = wasm_f32x4_sub(vemo0123, vminus_two);
const v128_t vepo4567 = wasm_f32x4_sub(vemo4567, vminus_two);
const v128_t vepo89AB = wasm_f32x4_sub(vemo89AB, vminus_two);
const v128_t vepoCDEF = wasm_f32x4_sub(vemoCDEF, vminus_two);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
v128_t vyCDEF = wasm_f32x4_div(vemoCDEF, vepoCDEF);
vy0123 = wasm_v128_bitselect(vx0123, vy0123, vsign_mask);
vy4567 = wasm_v128_bitselect(vx4567, vy4567, vsign_mask);
vy89AB = wasm_v128_bitselect(vx89AB, vy89AB, vsign_mask);
vyCDEF = wasm_v128_bitselect(vxCDEF, vyCDEF, vsign_mask);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 13,177
| 49.106464
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-abs-pmin-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_abs_pmin_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.index_mask);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sign_mask);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 5,686
| 39.913669
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-abs-pmin-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_abs_pmin_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.index_mask);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_abs.sign_mask);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
v128_t vz0123 = wasm_f32x4_abs(vx0123);
v128_t vz4567 = wasm_f32x4_abs(vx4567);
vz0123 = wasm_f32x4_pmin(vz0123, vsat_cutoff);
vz4567 = wasm_f32x4_pmin(vz4567, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 20);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 20);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const uint64_t vidx01 = wasm_u64x2_extract_lane(vidx0123, 0);
const uint64_t vidx45 = wasm_u64x2_extract_lane(vidx4567, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx01));
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx45));
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx01 >> 32)), vl0123, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx45 >> 32)), vl4567, 1);
const uint64_t vidx23 = wasm_u64x2_extract_lane(vidx0123, 1);
const uint64_t vidx67 = wasm_u64x2_extract_lane(vidx4567, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx23), vl0123, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx67), vl4567, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx23 >> 32)), vl0123, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx67 >> 32)), vl4567, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2), vz4567);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt0123), vc3);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt4567), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vminus_two);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vminus_two);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vepo0123 = wasm_f32x4_sub(vemo0123, vminus_two);
const v128_t vepo4567 = wasm_f32x4_sub(vemo4567, vminus_two);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
vy0123 = wasm_v128_bitselect(vx0123, vy0123, vsign_mask);
vy4567 = wasm_v128_bitselect(vx4567, vy4567, vsign_mask);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,486
| 43.539906
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-nabs-max-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_nabs_max_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.index_mask);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.minus_ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
v128_t vz0123 = wasm_v128_or(vx0123, vsign_mask);
v128_t vz4567 = wasm_v128_or(vx4567, vsign_mask);
v128_t vz89AB = wasm_v128_or(vx89AB, vsign_mask);
const v128_t vinvsignx0123 = wasm_v128_xor(vx0123, vz0123);
vz0123 = wasm_f32x4_max(vz0123, vsat_cutoff);
const v128_t vinvsignx4567 = wasm_v128_xor(vx4567, vz4567);
vz4567 = wasm_f32x4_max(vz4567, vsat_cutoff);
const v128_t vinvsignx89AB = wasm_v128_xor(vx89AB, vz89AB);
vz89AB = wasm_f32x4_max(vz89AB, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 20);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 20);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 20);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const uint64_t vidx01 = wasm_u64x2_extract_lane(vidx0123, 0);
const uint64_t vidx45 = wasm_u64x2_extract_lane(vidx4567, 0);
const uint64_t vidx89 = wasm_u64x2_extract_lane(vidx89AB, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx01));
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx45));
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx89));
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx01 >> 32)), vl0123, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx45 >> 32)), vl4567, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx89 >> 32)), vl89AB, 1);
const uint64_t vidx23 = wasm_u64x2_extract_lane(vidx0123, 1);
const uint64_t vidx67 = wasm_u64x2_extract_lane(vidx4567, 1);
const uint64_t vidxAB = wasm_u64x2_extract_lane(vidx89AB, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx23), vl0123, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx67), vl4567, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxAB), vl89AB, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx23 >> 32)), vl0123, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx67 >> 32)), vl4567, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxAB >> 32)), vl89AB, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2), vz89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt0123), vc3);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt4567), vc3);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vtwo);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vtwo);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vtwo);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vepo0123 = wasm_f32x4_add(vemo0123, vtwo);
const v128_t vepo4567 = wasm_f32x4_add(vemo4567, vtwo);
const v128_t vepo89AB = wasm_f32x4_add(vemo89AB, vtwo);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
vy0123 = wasm_v128_xor(vy0123, vinvsignx0123);
vy4567 = wasm_v128_xor(vy4567, vinvsignx4567);
vy89AB = wasm_v128_xor(vy89AB, vinvsignx89AB);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 11,561
| 46.191837
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-nabs-max-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_nabs_max_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.index_mask);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.minus_ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
v128_t vz0123 = wasm_v128_or(vx0123, vsign_mask);
v128_t vz4567 = wasm_v128_or(vx4567, vsign_mask);
v128_t vz89AB = wasm_v128_or(vx89AB, vsign_mask);
v128_t vzCDEF = wasm_v128_or(vxCDEF, vsign_mask);
const v128_t vinvsignx0123 = wasm_v128_xor(vx0123, vz0123);
vz0123 = wasm_f32x4_max(vz0123, vsat_cutoff);
const v128_t vinvsignx4567 = wasm_v128_xor(vx4567, vz4567);
vz4567 = wasm_f32x4_max(vz4567, vsat_cutoff);
const v128_t vinvsignx89AB = wasm_v128_xor(vx89AB, vz89AB);
vz89AB = wasm_f32x4_max(vz89AB, vsat_cutoff);
const v128_t vinvsignxCDEF = wasm_v128_xor(vxCDEF, vzCDEF);
vzCDEF = wasm_f32x4_max(vzCDEF, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 20);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 20);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 20);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t veCDEF = wasm_i32x4_shl(vnCDEF, 20);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const uint64_t vidx01 = wasm_u64x2_extract_lane(vidx0123, 0);
const uint64_t vidx45 = wasm_u64x2_extract_lane(vidx4567, 0);
const uint64_t vidx89 = wasm_u64x2_extract_lane(vidx89AB, 0);
const uint64_t vidxCD = wasm_u64x2_extract_lane(vidxCDEF, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx01));
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx45));
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx89));
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxCD));
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx01 >> 32)), vl0123, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx45 >> 32)), vl4567, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx89 >> 32)), vl89AB, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxCD >> 32)), vlCDEF, 1);
const uint64_t vidx23 = wasm_u64x2_extract_lane(vidx0123, 1);
const uint64_t vidx67 = wasm_u64x2_extract_lane(vidx4567, 1);
const uint64_t vidxAB = wasm_u64x2_extract_lane(vidx89AB, 1);
const uint64_t vidxEF = wasm_u64x2_extract_lane(vidxCDEF, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx23), vl0123, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx67), vl4567, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxAB), vl89AB, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxEF), vlCDEF, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx23 >> 32)), vl0123, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx67 >> 32)), vl4567, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxAB >> 32)), vl89AB, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxEF >> 32)), vlCDEF, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vsCDEF = wasm_i32x4_add(vlCDEF, veCDEF);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2), vz89AB);
const v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2), vzCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt0123), vc3);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt4567), vc3);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt89AB), vc3);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc4, vtCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vtwo);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vtwo);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vtwo);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtwo);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vtsCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
const v128_t vsmoCDEF = wasm_f32x4_sub(vsCDEF, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vemoCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtsCDEF), vsmoCDEF);
const v128_t vepo0123 = wasm_f32x4_add(vemo0123, vtwo);
const v128_t vepo4567 = wasm_f32x4_add(vemo4567, vtwo);
const v128_t vepo89AB = wasm_f32x4_add(vemo89AB, vtwo);
const v128_t vepoCDEF = wasm_f32x4_add(vemoCDEF, vtwo);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
v128_t vyCDEF = wasm_f32x4_div(vemoCDEF, vepoCDEF);
vy0123 = wasm_v128_xor(vy0123, vinvsignx0123);
vy4567 = wasm_v128_xor(vy4567, vinvsignx4567);
vy89AB = wasm_v128_xor(vy89AB, vinvsignx89AB);
vyCDEF = wasm_v128_xor(vyCDEF, vinvsignxCDEF);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 13,455
| 48.653137
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-nabs-max-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_nabs_max_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.index_mask);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.minus_ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 5,764
| 39.314685
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-nabs-max-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_nabs_max_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.index_mask);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.minus_ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
v128_t vz0123 = wasm_v128_or(vx0123, vsign_mask);
v128_t vz4567 = wasm_v128_or(vx4567, vsign_mask);
const v128_t vinvsignx0123 = wasm_v128_xor(vx0123, vz0123);
vz0123 = wasm_f32x4_max(vz0123, vsat_cutoff);
const v128_t vinvsignx4567 = wasm_v128_xor(vx4567, vz4567);
vz4567 = wasm_f32x4_max(vz4567, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 20);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 20);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const uint64_t vidx01 = wasm_u64x2_extract_lane(vidx0123, 0);
const uint64_t vidx45 = wasm_u64x2_extract_lane(vidx4567, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx01));
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx45));
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx01 >> 32)), vl0123, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx45 >> 32)), vl4567, 1);
const uint64_t vidx23 = wasm_u64x2_extract_lane(vidx0123, 1);
const uint64_t vidx67 = wasm_u64x2_extract_lane(vidx4567, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx23), vl0123, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx67), vl4567, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx23 >> 32)), vl0123, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx67 >> 32)), vl4567, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2), vz4567);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt0123), vc3);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt4567), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vtwo);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vtwo);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vepo0123 = wasm_f32x4_add(vemo0123, vtwo);
const v128_t vepo4567 = wasm_f32x4_add(vemo4567, vtwo);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
vy0123 = wasm_v128_xor(vy0123, vinvsignx0123);
vy4567 = wasm_v128_xor(vy4567, vinvsignx4567);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,664
| 43.13242
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-nabs-pmax-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_nabs_pmax_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.index_mask);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.minus_ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
v128_t vz0123 = wasm_v128_or(vx0123, vsign_mask);
v128_t vz4567 = wasm_v128_or(vx4567, vsign_mask);
v128_t vz89AB = wasm_v128_or(vx89AB, vsign_mask);
const v128_t vinvsignx0123 = wasm_v128_xor(vx0123, vz0123);
vz0123 = wasm_f32x4_pmax(vz0123, vsat_cutoff);
const v128_t vinvsignx4567 = wasm_v128_xor(vx4567, vz4567);
vz4567 = wasm_f32x4_pmax(vz4567, vsat_cutoff);
const v128_t vinvsignx89AB = wasm_v128_xor(vx89AB, vz89AB);
vz89AB = wasm_f32x4_pmax(vz89AB, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 20);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 20);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 20);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const uint64_t vidx01 = wasm_u64x2_extract_lane(vidx0123, 0);
const uint64_t vidx45 = wasm_u64x2_extract_lane(vidx4567, 0);
const uint64_t vidx89 = wasm_u64x2_extract_lane(vidx89AB, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx01));
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx45));
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx89));
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx01 >> 32)), vl0123, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx45 >> 32)), vl4567, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx89 >> 32)), vl89AB, 1);
const uint64_t vidx23 = wasm_u64x2_extract_lane(vidx0123, 1);
const uint64_t vidx67 = wasm_u64x2_extract_lane(vidx4567, 1);
const uint64_t vidxAB = wasm_u64x2_extract_lane(vidx89AB, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx23), vl0123, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx67), vl4567, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxAB), vl89AB, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx23 >> 32)), vl0123, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx67 >> 32)), vl4567, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxAB >> 32)), vl89AB, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2), vz89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt0123), vc3);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt4567), vc3);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vtwo);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vtwo);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vtwo);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vepo0123 = wasm_f32x4_add(vemo0123, vtwo);
const v128_t vepo4567 = wasm_f32x4_add(vemo4567, vtwo);
const v128_t vepo89AB = wasm_f32x4_add(vemo89AB, vtwo);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
vy0123 = wasm_v128_xor(vy0123, vinvsignx0123);
vy4567 = wasm_v128_xor(vy4567, vinvsignx4567);
vy89AB = wasm_v128_xor(vy89AB, vinvsignx89AB);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 11,567
| 46.216327
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-nabs-pmax-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_nabs_pmax_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.index_mask);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.minus_ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
v128_t vz0123 = wasm_v128_or(vx0123, vsign_mask);
v128_t vz4567 = wasm_v128_or(vx4567, vsign_mask);
v128_t vz89AB = wasm_v128_or(vx89AB, vsign_mask);
v128_t vzCDEF = wasm_v128_or(vxCDEF, vsign_mask);
const v128_t vinvsignx0123 = wasm_v128_xor(vx0123, vz0123);
vz0123 = wasm_f32x4_pmax(vz0123, vsat_cutoff);
const v128_t vinvsignx4567 = wasm_v128_xor(vx4567, vz4567);
vz4567 = wasm_f32x4_pmax(vz4567, vsat_cutoff);
const v128_t vinvsignx89AB = wasm_v128_xor(vx89AB, vz89AB);
vz89AB = wasm_f32x4_pmax(vz89AB, vsat_cutoff);
const v128_t vinvsignxCDEF = wasm_v128_xor(vxCDEF, vzCDEF);
vzCDEF = wasm_f32x4_pmax(vzCDEF, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 20);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 20);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 20);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t veCDEF = wasm_i32x4_shl(vnCDEF, 20);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const uint64_t vidx01 = wasm_u64x2_extract_lane(vidx0123, 0);
const uint64_t vidx45 = wasm_u64x2_extract_lane(vidx4567, 0);
const uint64_t vidx89 = wasm_u64x2_extract_lane(vidx89AB, 0);
const uint64_t vidxCD = wasm_u64x2_extract_lane(vidxCDEF, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx01));
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx45));
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx89));
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxCD));
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx01 >> 32)), vl0123, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx45 >> 32)), vl4567, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx89 >> 32)), vl89AB, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxCD >> 32)), vlCDEF, 1);
const uint64_t vidx23 = wasm_u64x2_extract_lane(vidx0123, 1);
const uint64_t vidx67 = wasm_u64x2_extract_lane(vidx4567, 1);
const uint64_t vidxAB = wasm_u64x2_extract_lane(vidx89AB, 1);
const uint64_t vidxEF = wasm_u64x2_extract_lane(vidxCDEF, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx23), vl0123, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx67), vl4567, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxAB), vl89AB, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidxEF), vlCDEF, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx23 >> 32)), vl0123, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx67 >> 32)), vl4567, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxAB >> 32)), vl89AB, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidxEF >> 32)), vlCDEF, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vsCDEF = wasm_i32x4_add(vlCDEF, veCDEF);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2), vz89AB);
const v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2), vzCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt0123), vc3);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt4567), vc3);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt89AB), vc3);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc4, vtCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vtwo);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vtwo);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vtwo);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtwo);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vtsCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
const v128_t vsmoCDEF = wasm_f32x4_sub(vsCDEF, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vemoCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtsCDEF), vsmoCDEF);
const v128_t vepo0123 = wasm_f32x4_add(vemo0123, vtwo);
const v128_t vepo4567 = wasm_f32x4_add(vemo4567, vtwo);
const v128_t vepo89AB = wasm_f32x4_add(vemo89AB, vtwo);
const v128_t vepoCDEF = wasm_f32x4_add(vemoCDEF, vtwo);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
v128_t vyCDEF = wasm_f32x4_div(vemoCDEF, vepoCDEF);
vy0123 = wasm_v128_xor(vy0123, vinvsignx0123);
vy4567 = wasm_v128_xor(vy4567, vinvsignx4567);
vy89AB = wasm_v128_xor(vy89AB, vinvsignx89AB);
vyCDEF = wasm_v128_xor(vyCDEF, vinvsignxCDEF);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 13,462
| 48.678967
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-nabs-pmax-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_nabs_pmax_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.index_mask);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.minus_ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 5,767
| 39.335664
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-lut8-p4h3ts-div-nabs-pmax-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_lut8_p4h3ts_div_nabs_pmax_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.magic_bias);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.index_mask);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.minus_ln2);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_lut8_p4h3_nabs.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
v128_t vz0123 = wasm_v128_or(vx0123, vsign_mask);
v128_t vz4567 = wasm_v128_or(vx4567, vsign_mask);
const v128_t vinvsignx0123 = wasm_v128_xor(vx0123, vz0123);
vz0123 = wasm_f32x4_pmax(vz0123, vsat_cutoff);
const v128_t vinvsignx4567 = wasm_v128_xor(vx4567, vz4567);
vz4567 = wasm_f32x4_pmax(vz4567, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 20);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 20);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const uint64_t vidx01 = wasm_u64x2_extract_lane(vidx0123, 0);
const uint64_t vidx45 = wasm_u64x2_extract_lane(vidx4567, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx01));
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx45));
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx01 >> 32)), vl0123, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx45 >> 32)), vl4567, 1);
const uint64_t vidx23 = wasm_u64x2_extract_lane(vidx0123, 1);
const uint64_t vidx67 = wasm_u64x2_extract_lane(vidx4567, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx23), vl0123, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx67), vl4567, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx23 >> 32)), vl0123, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx67 >> 32)), vl4567, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2), vz4567);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt0123), vc3);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt4567), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vtwo);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vtwo);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vepo0123 = wasm_f32x4_add(vemo0123, vtwo);
const v128_t vepo4567 = wasm_f32x4_add(vemo4567, vtwo);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
vy0123 = wasm_v128_xor(vy0123, vinvsignx0123);
vy4567 = wasm_v128_xor(vy4567, vinvsignx4567);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 20);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,669
| 43.155251
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-abs-min-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_abs_min_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.magic_bias);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sign_mask);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
v128_t vz0123 = wasm_f32x4_abs(vx0123);
v128_t vz4567 = wasm_f32x4_abs(vx4567);
v128_t vz89AB = wasm_f32x4_abs(vx89AB);
vz0123 = wasm_f32x4_min(vz0123, vsat_cutoff);
vz4567 = wasm_f32x4_min(vz4567, vsat_cutoff);
vz89AB = wasm_f32x4_min(vz89AB, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2), vz89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vminus_two);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vminus_two);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vminus_two);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vepo0123 = wasm_f32x4_sub(vemo0123, vminus_two);
const v128_t vepo4567 = wasm_f32x4_sub(vemo4567, vminus_two);
const v128_t vepo89AB = wasm_f32x4_sub(vemo89AB, vminus_two);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
vy0123 = wasm_v128_bitselect(vx0123, vy0123, vsign_mask);
vy4567 = wasm_v128_bitselect(vx4567, vy4567, vsign_mask);
vy89AB = wasm_v128_bitselect(vx89AB, vy89AB, vsign_mask);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 8,048
| 40.066327
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-abs-min-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_abs_min_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.magic_bias);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
v128_t vz0123 = wasm_f32x4_abs(vx0123);
v128_t vz4567 = wasm_f32x4_abs(vx4567);
v128_t vz89AB = wasm_f32x4_abs(vx89AB);
v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
vz0123 = wasm_f32x4_min(vz0123, vsat_cutoff);
vz4567 = wasm_f32x4_min(vz4567, vsat_cutoff);
vz89AB = wasm_f32x4_min(vz89AB, vsat_cutoff);
vzCDEF = wasm_f32x4_min(vzCDEF, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2), vz89AB);
const v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2), vzCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vminus_two);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vminus_two);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vminus_two);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vminus_two);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vtsCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
const v128_t vsmoCDEF = wasm_f32x4_sub(vsCDEF, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vemoCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtsCDEF), vsmoCDEF);
const v128_t vepo0123 = wasm_f32x4_sub(vemo0123, vminus_two);
const v128_t vepo4567 = wasm_f32x4_sub(vemo4567, vminus_two);
const v128_t vepo89AB = wasm_f32x4_sub(vemo89AB, vminus_two);
const v128_t vepoCDEF = wasm_f32x4_sub(vemoCDEF, vminus_two);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
v128_t vyCDEF = wasm_f32x4_div(vemoCDEF, vepoCDEF);
vy0123 = wasm_v128_bitselect(vx0123, vy0123, vsign_mask);
vy4567 = wasm_v128_bitselect(vx4567, vy4567, vsign_mask);
vy89AB = wasm_v128_bitselect(vx89AB, vy89AB, vsign_mask);
vyCDEF = wasm_v128_bitselect(vxCDEF, vyCDEF, vsign_mask);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,238
| 41.972093
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-abs-min-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_abs_min_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.magic_bias);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sign_mask);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 4,370
| 34.827869
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-abs-min-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_abs_min_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.magic_bias);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sign_mask);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
v128_t vz0123 = wasm_f32x4_abs(vx0123);
v128_t vz4567 = wasm_f32x4_abs(vx4567);
vz0123 = wasm_f32x4_min(vz0123, vsat_cutoff);
vz4567 = wasm_f32x4_min(vz4567, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2), vz4567);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vminus_two);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vminus_two);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vepo0123 = wasm_f32x4_sub(vemo0123, vminus_two);
const v128_t vepo4567 = wasm_f32x4_sub(vemo4567, vminus_two);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
vy0123 = wasm_v128_bitselect(vx0123, vy0123, vsign_mask);
vy4567 = wasm_v128_bitselect(vx4567, vy4567, vsign_mask);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_min(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 6,855
| 37.734463
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-abs-pmin-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_abs_pmin_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.magic_bias);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sign_mask);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
v128_t vz0123 = wasm_f32x4_abs(vx0123);
v128_t vz4567 = wasm_f32x4_abs(vx4567);
v128_t vz89AB = wasm_f32x4_abs(vx89AB);
vz0123 = wasm_f32x4_pmin(vz0123, vsat_cutoff);
vz4567 = wasm_f32x4_pmin(vz4567, vsat_cutoff);
vz89AB = wasm_f32x4_pmin(vz89AB, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2), vz89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vminus_two);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vminus_two);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vminus_two);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vepo0123 = wasm_f32x4_sub(vemo0123, vminus_two);
const v128_t vepo4567 = wasm_f32x4_sub(vemo4567, vminus_two);
const v128_t vepo89AB = wasm_f32x4_sub(vemo89AB, vminus_two);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
vy0123 = wasm_v128_bitselect(vx0123, vy0123, vsign_mask);
vy4567 = wasm_v128_bitselect(vx4567, vy4567, vsign_mask);
vy89AB = wasm_v128_bitselect(vx89AB, vy89AB, vsign_mask);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 8,054
| 40.096939
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-abs-pmin-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_abs_pmin_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.magic_bias);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
v128_t vz0123 = wasm_f32x4_abs(vx0123);
v128_t vz4567 = wasm_f32x4_abs(vx4567);
v128_t vz89AB = wasm_f32x4_abs(vx89AB);
v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
vz0123 = wasm_f32x4_pmin(vz0123, vsat_cutoff);
vz4567 = wasm_f32x4_pmin(vz4567, vsat_cutoff);
vz89AB = wasm_f32x4_pmin(vz89AB, vsat_cutoff);
vzCDEF = wasm_f32x4_pmin(vzCDEF, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2), vz89AB);
const v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2), vzCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vminus_two);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vminus_two);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vminus_two);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vminus_two);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vtsCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
const v128_t vsmoCDEF = wasm_f32x4_sub(vsCDEF, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vemoCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtsCDEF), vsmoCDEF);
const v128_t vepo0123 = wasm_f32x4_sub(vemo0123, vminus_two);
const v128_t vepo4567 = wasm_f32x4_sub(vemo4567, vminus_two);
const v128_t vepo89AB = wasm_f32x4_sub(vemo89AB, vminus_two);
const v128_t vepoCDEF = wasm_f32x4_sub(vemoCDEF, vminus_two);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
v128_t vyCDEF = wasm_f32x4_div(vemoCDEF, vepoCDEF);
vy0123 = wasm_v128_bitselect(vx0123, vy0123, vsign_mask);
vy4567 = wasm_v128_bitselect(vx4567, vy4567, vsign_mask);
vy89AB = wasm_v128_bitselect(vx89AB, vy89AB, vsign_mask);
vyCDEF = wasm_v128_bitselect(vxCDEF, vyCDEF, vsign_mask);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,245
| 42.004651
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-abs-pmin-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_abs_pmin_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.magic_bias);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sign_mask);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 4,373
| 34.852459
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-abs-pmin-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_abs_pmin_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sat_cutoff);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.magic_bias);
const v128_t vln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.c2);
const v128_t vminus_two = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.minus_two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.one);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_abs.sign_mask);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
v128_t vz0123 = wasm_f32x4_abs(vx0123);
v128_t vz4567 = wasm_f32x4_abs(vx4567);
vz0123 = wasm_f32x4_pmin(vz0123, vsat_cutoff);
vz4567 = wasm_f32x4_pmin(vz4567, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2), vz4567);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vminus_two);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vminus_two);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vepo0123 = wasm_f32x4_sub(vemo0123, vminus_two);
const v128_t vepo4567 = wasm_f32x4_sub(vemo4567, vminus_two);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
vy0123 = wasm_v128_bitselect(vx0123, vy0123, vsign_mask);
vy4567 = wasm_v128_bitselect(vx4567, vy4567, vsign_mask);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_f32x4_abs(vx);
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 6,860
| 37.762712
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-nabs-max-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_nabs_max_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.magic_bias);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.minus_ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
v128_t vz0123 = wasm_v128_or(vx0123, vsign_mask);
v128_t vz4567 = wasm_v128_or(vx4567, vsign_mask);
v128_t vz89AB = wasm_v128_or(vx89AB, vsign_mask);
const v128_t vinvsignx0123 = wasm_v128_xor(vx0123, vz0123);
vz0123 = wasm_f32x4_max(vz0123, vsat_cutoff);
const v128_t vinvsignx4567 = wasm_v128_xor(vx4567, vz4567);
vz4567 = wasm_f32x4_max(vz4567, vsat_cutoff);
const v128_t vinvsignx89AB = wasm_v128_xor(vx89AB, vz89AB);
vz89AB = wasm_f32x4_max(vz89AB, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2), vz89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vtwo);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vtwo);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vtwo);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vepo0123 = wasm_f32x4_add(vemo0123, vtwo);
const v128_t vepo4567 = wasm_f32x4_add(vemo4567, vtwo);
const v128_t vepo89AB = wasm_f32x4_add(vemo89AB, vtwo);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
vy0123 = wasm_v128_xor(vy0123, vinvsignx0123);
vy4567 = wasm_v128_xor(vy4567, vinvsignx4567);
vy89AB = wasm_v128_xor(vy89AB, vinvsignx89AB);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 8,283
| 39.807882
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-nabs-max-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_nabs_max_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.magic_bias);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.minus_ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
v128_t vz0123 = wasm_v128_or(vx0123, vsign_mask);
v128_t vz4567 = wasm_v128_or(vx4567, vsign_mask);
v128_t vz89AB = wasm_v128_or(vx89AB, vsign_mask);
v128_t vzCDEF = wasm_v128_or(vxCDEF, vsign_mask);
const v128_t vinvsignx0123 = wasm_v128_xor(vx0123, vz0123);
vz0123 = wasm_f32x4_max(vz0123, vsat_cutoff);
const v128_t vinvsignx4567 = wasm_v128_xor(vx4567, vz4567);
vz4567 = wasm_f32x4_max(vz4567, vsat_cutoff);
const v128_t vinvsignx89AB = wasm_v128_xor(vx89AB, vz89AB);
vz89AB = wasm_f32x4_max(vz89AB, vsat_cutoff);
const v128_t vinvsignxCDEF = wasm_v128_xor(vxCDEF, vzCDEF);
vzCDEF = wasm_f32x4_max(vzCDEF, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2), vz89AB);
const v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2), vzCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vtwo);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vtwo);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vtwo);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtwo);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vtsCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
const v128_t vsmoCDEF = wasm_f32x4_sub(vsCDEF, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vemoCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtsCDEF), vsmoCDEF);
const v128_t vepo0123 = wasm_f32x4_add(vemo0123, vtwo);
const v128_t vepo4567 = wasm_f32x4_add(vemo4567, vtwo);
const v128_t vepo89AB = wasm_f32x4_add(vemo89AB, vtwo);
const v128_t vepoCDEF = wasm_f32x4_add(vemoCDEF, vtwo);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
v128_t vyCDEF = wasm_f32x4_div(vemoCDEF, vepoCDEF);
vy0123 = wasm_v128_xor(vy0123, vinvsignx0123);
vy4567 = wasm_v128_xor(vy4567, vinvsignx4567);
vy89AB = wasm_v128_xor(vy89AB, vinvsignx89AB);
vyCDEF = wasm_v128_xor(vyCDEF, vinvsignxCDEF);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,524
| 41.713004
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-nabs-max-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_nabs_max_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.magic_bias);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.minus_ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 4,452
| 34.34127
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-nabs-max-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_nabs_max_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.magic_bias);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.minus_ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
v128_t vz0123 = wasm_v128_or(vx0123, vsign_mask);
v128_t vz4567 = wasm_v128_or(vx4567, vsign_mask);
const v128_t vinvsignx0123 = wasm_v128_xor(vx0123, vz0123);
vz0123 = wasm_f32x4_max(vz0123, vsat_cutoff);
const v128_t vinvsignx4567 = wasm_v128_xor(vx4567, vz4567);
vz4567 = wasm_f32x4_max(vz4567, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2), vz4567);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vtwo);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vtwo);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vepo0123 = wasm_f32x4_add(vemo0123, vtwo);
const v128_t vepo4567 = wasm_f32x4_add(vemo4567, vtwo);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
vy0123 = wasm_v128_xor(vy0123, vinvsignx0123);
vy4567 = wasm_v128_xor(vy4567, vinvsignx4567);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_max(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 7,039
| 37.469945
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-nabs-pmax-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_nabs_pmax_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.magic_bias);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.minus_ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
v128_t vz0123 = wasm_v128_or(vx0123, vsign_mask);
v128_t vz4567 = wasm_v128_or(vx4567, vsign_mask);
v128_t vz89AB = wasm_v128_or(vx89AB, vsign_mask);
const v128_t vinvsignx0123 = wasm_v128_xor(vx0123, vz0123);
vz0123 = wasm_f32x4_pmax(vz0123, vsat_cutoff);
const v128_t vinvsignx4567 = wasm_v128_xor(vx4567, vz4567);
vz4567 = wasm_f32x4_pmax(vz4567, vsat_cutoff);
const v128_t vinvsignx89AB = wasm_v128_xor(vx89AB, vz89AB);
vz89AB = wasm_f32x4_pmax(vz89AB, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2), vz89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vtwo);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vtwo);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vtwo);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vepo0123 = wasm_f32x4_add(vemo0123, vtwo);
const v128_t vepo4567 = wasm_f32x4_add(vemo4567, vtwo);
const v128_t vepo89AB = wasm_f32x4_add(vemo89AB, vtwo);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
vy0123 = wasm_v128_xor(vy0123, vinvsignx0123);
vy4567 = wasm_v128_xor(vy4567, vinvsignx4567);
vy89AB = wasm_v128_xor(vy89AB, vinvsignx89AB);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 8,289
| 39.837438
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-nabs-pmax-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_nabs_pmax_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.magic_bias);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.minus_ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
v128_t vz0123 = wasm_v128_or(vx0123, vsign_mask);
v128_t vz4567 = wasm_v128_or(vx4567, vsign_mask);
v128_t vz89AB = wasm_v128_or(vx89AB, vsign_mask);
v128_t vzCDEF = wasm_v128_or(vxCDEF, vsign_mask);
const v128_t vinvsignx0123 = wasm_v128_xor(vx0123, vz0123);
vz0123 = wasm_f32x4_pmax(vz0123, vsat_cutoff);
const v128_t vinvsignx4567 = wasm_v128_xor(vx4567, vz4567);
vz4567 = wasm_f32x4_pmax(vz4567, vsat_cutoff);
const v128_t vinvsignx89AB = wasm_v128_xor(vx89AB, vz89AB);
vz89AB = wasm_f32x4_pmax(vz89AB, vsat_cutoff);
const v128_t vinvsignxCDEF = wasm_v128_xor(vxCDEF, vzCDEF);
vzCDEF = wasm_f32x4_pmax(vzCDEF, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2), vz4567);
const v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2), vz89AB);
const v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2), vzCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vtwo);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vtwo);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vtwo);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtwo);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vts89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t vsmo89AB = wasm_f32x4_sub(vs89AB, vone);
const v128_t vtsCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
const v128_t vsmoCDEF = wasm_f32x4_sub(vsCDEF, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vemo89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vts89AB), vsmo89AB);
const v128_t vemoCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtsCDEF), vsmoCDEF);
const v128_t vepo0123 = wasm_f32x4_add(vemo0123, vtwo);
const v128_t vepo4567 = wasm_f32x4_add(vemo4567, vtwo);
const v128_t vepo89AB = wasm_f32x4_add(vemo89AB, vtwo);
const v128_t vepoCDEF = wasm_f32x4_add(vemoCDEF, vtwo);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
v128_t vy89AB = wasm_f32x4_div(vemo89AB, vepo89AB);
v128_t vyCDEF = wasm_f32x4_div(vemoCDEF, vepoCDEF);
vy0123 = wasm_v128_xor(vy0123, vinvsignx0123);
vy4567 = wasm_v128_xor(vy4567, vinvsignx4567);
vy89AB = wasm_v128_xor(vy89AB, vinvsignx89AB);
vyCDEF = wasm_v128_xor(vyCDEF, vinvsignxCDEF);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,531
| 41.744395
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-nabs-pmax-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_nabs_pmax_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.magic_bias);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.minus_ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 4,455
| 34.365079
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-wasmsimd-expm1minus-rr1-p6h5ts-div-nabs-pmax-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__wasmsimd_expm1minus_rr1_p6h5ts_div_nabs_pmax_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sign_mask);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.sat_cutoff);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.magic_bias);
const v128_t vminus_ln2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.minus_ln2);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.c2);
const v128_t vtwo = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.two);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_expm1minus_rr1_p6h5_nabs.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
v128_t vz0123 = wasm_v128_or(vx0123, vsign_mask);
v128_t vz4567 = wasm_v128_or(vx4567, vsign_mask);
const v128_t vinvsignx0123 = wasm_v128_xor(vx0123, vz0123);
vz0123 = wasm_f32x4_pmax(vz0123, vsat_cutoff);
const v128_t vinvsignx4567 = wasm_v128_xor(vx4567, vz4567);
vz4567 = wasm_f32x4_pmax(vz4567, vsat_cutoff);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2), vz0123);
const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2), vz4567);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vtwo);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vtwo);
const v128_t vts0123 = wasm_f32x4_mul(vt0123, vs0123);
const v128_t vsmo0123 = wasm_f32x4_sub(vs0123, vone);
const v128_t vts4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t vsmo4567 = wasm_f32x4_sub(vs4567, vone);
const v128_t vemo0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vts0123), vsmo0123);
const v128_t vemo4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vts4567), vsmo4567);
const v128_t vepo0123 = wasm_f32x4_add(vemo0123, vtwo);
const v128_t vepo4567 = wasm_f32x4_add(vemo4567, vtwo);
v128_t vy0123 = wasm_f32x4_div(vemo0123, vepo0123);
v128_t vy4567 = wasm_f32x4_div(vemo4567, vepo4567);
vy0123 = wasm_v128_xor(vy0123, vinvsignx0123);
vy4567 = wasm_v128_xor(vy4567, vinvsignx4567);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vz = wasm_v128_or(vx, vsign_mask);
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
vy = wasm_v128_xor(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 7,044
| 37.497268
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__avx_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vnonsign_mask = _mm256_load_ps(params->avx.nonsign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
const __m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vy01234567 = _mm256_and_ps(vx01234567, vnonsign_mask);
const __m256 vy89ABCDEF = _mm256_and_ps(vx89ABCDEF, vnonsign_mask);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vy = _mm256_and_ps(vx, vnonsign_mask);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vy = _mm256_and_ps(vx, vnonsign_mask);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 2,194
| 29.068493
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__avx_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vnonsign_mask = _mm256_load_ps(params->avx.nonsign_mask);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
input += 8;
const __m256 vy01234567 = _mm256_and_ps(vx01234567, vnonsign_mask);
_mm256_storeu_ps(output, vy01234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vy = _mm256_and_ps(vx, vnonsign_mask);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 1,773
| 27.15873
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__avx512f_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vnonsign_mask = _mm512_set1_epi32((int) params->avx512.nonsign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512i vx0123456789ABCDEF = _mm512_loadu_si512(input);
input += 16;
const __m512i vy0123456789ABCDEF = _mm512_and_epi32(vx0123456789ABCDEF, vnonsign_mask);
_mm512_storeu_si512(output, vy0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512i vx = _mm512_maskz_loadu_epi32(vmask, input);
const __m512i vy = _mm512_and_epi32(vx, vnonsign_mask);
_mm512_mask_storeu_epi32(output, vmask, vy);
}
}
| 1,616
| 30.096154
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__avx512f_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vnonsign_mask = _mm512_set1_epi32((int) params->avx512.nonsign_mask);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512i vx0123456789ABCDEF = _mm512_loadu_si512(input);
const __m512i vxGHIJKLMNOPQRSTUV = _mm512_loadu_si512(input + 16);
input += 32;
const __m512i vy0123456789ABCDEF = _mm512_and_epi32(vx0123456789ABCDEF, vnonsign_mask);
const __m512i vyGHIJKLMNOPQRSTUV = _mm512_and_epi32(vxGHIJKLMNOPQRSTUV, vnonsign_mask);
_mm512_storeu_si512(output, vy0123456789ABCDEF);
_mm512_storeu_si512(output + 16, vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512i vx = _mm512_loadu_si512(input);
input += 16;
const __m512i vy = _mm512_and_epi32(vx, vnonsign_mask);
_mm512_storeu_si512(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512i vx = _mm512_maskz_loadu_epi32(vmask, input);
const __m512i vy = _mm512_and_epi32(vx, vnonsign_mask);
_mm512_mask_storeu_epi32(output, vmask, vy);
}
}
| 2,094
| 31.734375
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__neon_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vy0123 = vabsq_f32(vx0123);
vst1q_f32(output, vy0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vy = vabsq_f32(vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 1,274
| 25.020408
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__neon_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vy0123 = vabsq_f32(vx0123);
const float32x4_t vy4567 = vabsq_f32(vx4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vy = vabsq_f32(vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vy = vabsq_f32(vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 1,639
| 27.77193
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-rvv-x1v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__rvv_x1v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m1(batch);
const vfloat32m1_t vi = __riscv_vle32_v_f32m1(input, n);
input += n;
const vfloat32m1_t vo = __riscv_vfabs_v_f32m1(vi, n);
__riscv_vse32_v_f32m1(output, vo, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 962
| 22.487805
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-rvv-x2v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__rvv_x2v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m2(batch);
const vfloat32m2_t vi = __riscv_vle32_v_f32m2(input, n);
input += n;
const vfloat32m2_t vo = __riscv_vfabs_v_f32m2(vi, n);
__riscv_vse32_v_f32m2(output, vo, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 962
| 22.487805
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-rvv-x4v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__rvv_x4v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m4(batch);
const vfloat32m4_t vi = __riscv_vle32_v_f32m4(input, n);
input += n;
const vfloat32m4_t vo = __riscv_vfabs_v_f32m4(vi, n);
__riscv_vse32_v_f32m4(output, vo, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 962
| 22.487805
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-rvv-x8v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__rvv_x8v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m8(batch);
const vfloat32m8_t vi = __riscv_vle32_v_f32m8(input, n);
input += n;
const vfloat32m8_t vo = __riscv_vfabs_v_f32m8(vi, n);
__riscv_vse32_v_f32m8(output, vo, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 962
| 22.487805
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__scalar_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float vx = *input++;
const float vy = fabsf(vx);
*output++ = vy;
}
}
| 832
| 22.8
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__scalar_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
const float vy0 = fabsf(vx0);
const float vy1 = fabsf(vx1);
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
const float vy = fabsf(vx);
*output = vy;
}
}
| 1,081
| 22.021277
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__scalar_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
const float vy0 = fabsf(vx0);
const float vy1 = fabsf(vx1);
const float vy2 = fabsf(vx2);
const float vy3 = fabsf(vx3);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
const float vy = fabsf(vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,330
| 22.767857
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__sse_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vnonsign_mask = _mm_load_ps(params->sse.nonsign_mask);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
input += 4;
const __m128 vy0123 = _mm_and_ps(vx0123, vnonsign_mask);
_mm_storeu_ps(output, vy0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vy = _mm_and_ps(vx, vnonsign_mask);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,362
| 24.716981
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__sse_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vnonsign_mask = _mm_load_ps(params->sse.nonsign_mask);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vy0123 = _mm_and_ps(vx0123, vnonsign_mask);
const __m128 vy4567 = _mm_and_ps(vx4567, vnonsign_mask);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vy = _mm_and_ps(vx, vnonsign_mask);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vy = _mm_and_ps(vx, vnonsign_mask);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,744
| 26.698413
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__wasmsimd_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vnonsign_mask = wasm_v128_load64_splat(¶ms->wasmsimd.nonsign_mask);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vy = wasm_v128_and(vx, vnonsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_v128_and(vx, vnonsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,415
| 25.716981
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vabs-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vabs_ukernel__wasmsimd_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_abs_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vnonsign_mask = wasm_v128_load64_splat(¶ms->wasmsimd.nonsign_mask);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vy0123 = wasm_v128_and(vx0123, vnonsign_mask);
const v128_t vy4567 = wasm_v128_and(vx4567, vnonsign_mask);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vy = wasm_v128_and(vx, vnonsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_v128_and(vx, vnonsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,829
| 27.153846
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__avx_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->sse.sign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
const __m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vy01234567 = _mm256_xor_ps(vx01234567, vsign_mask);
const __m256 vy89ABCDEF = _mm256_xor_ps(vx89ABCDEF, vsign_mask);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vy = _mm256_xor_ps(vx, vsign_mask);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vy = _mm256_xor_ps(vx, vsign_mask);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 2,176
| 28.821918
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__avx_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->sse.sign_mask);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
input += 8;
const __m256 vy01234567 = _mm256_xor_ps(vx01234567, vsign_mask);
_mm256_storeu_ps(output, vy01234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vy = _mm256_xor_ps(vx, vsign_mask);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 1,761
| 26.968254
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__avx512f_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512.sign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512i vx0123456789ABCDEF = _mm512_loadu_si512(input);
input += 16;
const __m512i vy0123456789ABCDEF = _mm512_xor_epi32(vx0123456789ABCDEF, vsign_mask);
_mm512_storeu_si512(output, vy0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512i vx = _mm512_maskz_loadu_epi32(vmask, input);
const __m512i vy = _mm512_xor_epi32(vx, vsign_mask);
_mm512_mask_storeu_epi32(output, vmask, vy);
}
}
| 1,604
| 29.865385
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__avx512f_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512.sign_mask);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512i vx0123456789ABCDEF = _mm512_loadu_si512(input);
const __m512i vxGHIJKLMNOPQRSTUV = _mm512_loadu_si512(input + 16);
input += 32;
const __m512i vy0123456789ABCDEF = _mm512_xor_epi32(vx0123456789ABCDEF, vsign_mask);
const __m512i vyGHIJKLMNOPQRSTUV = _mm512_xor_epi32(vxGHIJKLMNOPQRSTUV, vsign_mask);
_mm512_storeu_si512(output, vy0123456789ABCDEF);
_mm512_storeu_si512(output + 16, vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512i vx = _mm512_loadu_si512(input);
input += 16;
const __m512i vy = _mm512_xor_epi32(vx, vsign_mask);
_mm512_storeu_si512(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512i vx = _mm512_maskz_loadu_epi32(vmask, input);
const __m512i vy = _mm512_xor_epi32(vx, vsign_mask);
_mm512_mask_storeu_epi32(output, vmask, vy);
}
}
| 2,076
| 31.453125
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__neon_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vy0123 = vnegq_f32(vx0123);
vst1q_f32(output, vy0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vy = vnegq_f32(vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 1,274
| 25.020408
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__neon_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vy0123 = vnegq_f32(vx0123);
const float32x4_t vy4567 = vnegq_f32(vx4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vy = vnegq_f32(vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vy = vnegq_f32(vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 1,639
| 27.77193
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-rvv-x1v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__rvv_x1v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m1(batch);
const vfloat32m1_t vi = __riscv_vle32_v_f32m1(input, n);
input += n;
const vfloat32m1_t vo = __riscv_vfneg_v_f32m1(vi, n);
__riscv_vse32_v_f32m1(output, vo, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 962
| 22.487805
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-rvv-x2v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__rvv_x2v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m2(batch);
const vfloat32m2_t vi = __riscv_vle32_v_f32m2(input, n);
input += n;
const vfloat32m2_t vo = __riscv_vfneg_v_f32m2(vi, n);
__riscv_vse32_v_f32m2(output, vo, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 962
| 22.487805
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-rvv-x4v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__rvv_x4v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m4(batch);
const vfloat32m4_t vi = __riscv_vle32_v_f32m4(input, n);
input += n;
const vfloat32m4_t vo = __riscv_vfneg_v_f32m4(vi, n);
__riscv_vse32_v_f32m4(output, vo, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 962
| 22.487805
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-rvv-x8v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__rvv_x8v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m8(batch);
const vfloat32m8_t vi = __riscv_vle32_v_f32m8(input, n);
input += n;
const vfloat32m8_t vo = __riscv_vfneg_v_f32m8(vi, n);
__riscv_vse32_v_f32m8(output, vo, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 962
| 22.487805
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__scalar_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float vx = *input++;
const float vy = -vx;
*output++ = vy;
}
}
| 808
| 22.794118
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__scalar_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
const float vy0 = -vx0;
const float vy1 = -vx1;
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
const float vy = -vx;
*output = vy;
}
}
| 1,045
| 21.73913
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__scalar_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
const float vy0 = -vx0;
const float vy1 = -vx1;
const float vy2 = -vx2;
const float vy3 = -vx3;
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
const float vy = -vx;
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,282
| 22.327273
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__sse_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse.sign_mask);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
input += 4;
const __m128 vy0123 = _mm_xor_ps(vx0123, vsign_mask);
_mm_storeu_ps(output, vy0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vy = _mm_xor_ps(vx, vsign_mask);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,350
| 24.490566
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__sse_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse.sign_mask);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vy0123 = _mm_xor_ps(vx0123, vsign_mask);
const __m128 vy4567 = _mm_xor_ps(vx4567, vsign_mask);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vy = _mm_xor_ps(vx, vsign_mask);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vy = _mm_xor_ps(vx, vsign_mask);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,726
| 26.412698
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__wasmsimd_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(¶ms->wasmsimd.sign_mask);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vy = wasm_v128_xor(vx, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_v128_xor(vx, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,403
| 25.490566
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vneg-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vneg_ukernel__wasmsimd_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_neg_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(¶ms->wasmsimd.sign_mask);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vy0123 = wasm_v128_xor(vx0123, vsign_mask);
const v128_t vy4567 = wasm_v128_xor(vx4567, vsign_mask);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vy = wasm_v128_xor(vx, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_v128_xor(vx, vsign_mask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,811
| 26.876923
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__avx_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
const __m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vy01234567 = _mm256_mul_ps(vx01234567, vx01234567);
const __m256 vy89ABCDEF = _mm256_mul_ps(vx89ABCDEF, vx89ABCDEF);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vy = _mm256_mul_ps(vx, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vy = _mm256_mul_ps(vx, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 2,097
| 28.138889
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__avx_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
input += 8;
const __m256 vy01234567 = _mm256_mul_ps(vx01234567, vx01234567);
_mm256_storeu_ps(output, vy01234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vy = _mm256_mul_ps(vx, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 1,690
| 26.274194
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__avx512f_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx0123456789ABCDEF = _mm512_loadu_ps(input);
input += 16;
const __m512 vy0123456789ABCDEF = _mm512_mul_ps(vx0123456789ABCDEF, vx0123456789ABCDEF);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vy = _mm512_mul_ps(vx, vx);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 1,506
| 28.54902
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__avx512f_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0123456789ABCDEF = _mm512_loadu_ps(input);
const __m512 vxGHIJKLMNOPQRSTUV = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vy0123456789ABCDEF = _mm512_mul_ps(vx0123456789ABCDEF, vx0123456789ABCDEF);
const __m512 vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vxGHIJKLMNOPQRSTUV, vxGHIJKLMNOPQRSTUV);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vy = _mm512_mul_ps(vx, vx);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vy = _mm512_mul_ps(vx, vx);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 1,956
| 30.063492
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__neon_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vy0123 = vmulq_f32(vx0123, vx0123);
vst1q_f32(output, vy0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vy = vmulq_f32(vx, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 1,290
| 25.346939
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__neon_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vy0123 = vmulq_f32(vx0123, vx0123);
const float32x4_t vy4567 = vmulq_f32(vx4567, vx4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vy = vmulq_f32(vx, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vy = vmulq_f32(vx, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 1,667
| 28.263158
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-rvv-x1v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__rvv_x1v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m1(batch);
const vfloat32m1_t vi = __riscv_vle32_v_f32m1(input, n);
input += n;
const vfloat32m1_t vo = __riscv_vfmul_vv_f32m1(vi, vi, n);
__riscv_vse32_v_f32m1(output, vo, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 971
| 22.707317
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-rvv-x2v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__rvv_x2v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m2(batch);
const vfloat32m2_t vi = __riscv_vle32_v_f32m2(input, n);
input += n;
const vfloat32m2_t vo = __riscv_vfmul_vv_f32m2(vi, vi, n);
__riscv_vse32_v_f32m2(output, vo, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 971
| 22.707317
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-rvv-x4v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__rvv_x4v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m4(batch);
const vfloat32m4_t vi = __riscv_vle32_v_f32m4(input, n);
input += n;
const vfloat32m4_t vo = __riscv_vfmul_vv_f32m4(vi, vi, n);
__riscv_vse32_v_f32m4(output, vo, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 971
| 22.707317
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-rvv-x8v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__rvv_x8v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m8(batch);
const vfloat32m8_t vi = __riscv_vle32_v_f32m8(input, n);
input += n;
const vfloat32m8_t vo = __riscv_vfmul_vv_f32m8(vi, vi, n);
__riscv_vse32_v_f32m8(output, vo, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 971
| 22.707317
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__scalar_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float vx = *input++;
const float vy = vx * vx;
*output++ = vy;
}
}
| 816
| 23.029412
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__scalar_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
const float vy0 = vx0 * vx0;
const float vy1 = vx1 * vx1;
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
const float vy = vx * vx;
*output = vy;
}
}
| 1,063
| 22.130435
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__scalar_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
const float vy0 = vx0 * vx0;
const float vy1 = vx1 * vx1;
const float vy2 = vx2 * vx2;
const float vy3 = vx3 * vx3;
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
const float vy = vx * vx;
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,310
| 22.836364
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__sse_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
input += 4;
const __m128 vy0123 = _mm_mul_ps(vx0123, vx0123);
_mm_storeu_ps(output, vy0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vy = _mm_mul_ps(vx, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,278
| 23.596154
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__sse_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vy0123 = _mm_mul_ps(vx0123, vx0123);
const __m128 vy4567 = _mm_mul_ps(vx4567, vx4567);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vy = _mm_mul_ps(vx, vx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vy = _mm_mul_ps(vx, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,642
| 25.5
| 90
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.