repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-nr1-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_nr1_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 4,615
| 36.836066
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-nr1-x160.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_nr1_x160(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
input += 160;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
const __m512 vz9 = _mm512_range_ps(vsat_cutoff, vx9, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
__m512 vn9 = _mm512_fmadd_ps(vz9, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn6), 23));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn7), 23));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn8), 23));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vs9 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn9), 23));
vn9 = _mm512_sub_ps(vn9, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
const __m512 vt9 = _mm512_fmadd_ps(vn9, vln2, vz9);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
__m512 vp3 = vc6;
__m512 vp4 = vc6;
__m512 vp5 = vc6;
__m512 vp6 = vc6;
__m512 vp7 = vc6;
__m512 vp8 = vc6;
__m512 vp9 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc5);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc4);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
vp9 = _mm512_fmadd_ps(vp9, vt9, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vts9 = _mm512_mul_ps(vt9, vs9);
const __m512 vsmo9 = _mm512_sub_ps(vs9, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vemo9 = _mm512_fmadd_ps(vp9, vts9, vsmo9);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
const __m512 vepo9 = _mm512_sub_ps(vemo9, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
__m512 vrepo7 = _mm512_rcp14_ps(vepo7);
__m512 vrepo8 = _mm512_rcp14_ps(vepo8);
__m512 vrepo9 = _mm512_rcp14_ps(vepo9);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
const __m512 verepo7 = _mm512_fnmadd_ps(vrepo7, vepo7, vone);
const __m512 verepo8 = _mm512_fnmadd_ps(vrepo8, vepo8, vone);
const __m512 verepo9 = _mm512_fnmadd_ps(vrepo9, vepo9, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm512_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm512_fmadd_ps(verepo8, vrepo8, vrepo8);
vrepo9 = _mm512_fmadd_ps(verepo9, vrepo9, vrepo9);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
__m512 vy7 = _mm512_mul_ps(vemo7, vrepo7);
__m512 vy8 = _mm512_mul_ps(vemo8, vrepo8);
__m512 vy9 = _mm512_mul_ps(vemo9, vrepo9);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
vy9 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy9), _mm512_castps_si512(vx9), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
_mm512_storeu_ps(output + 144, vy9);
output += 160;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 16,842
| 46.579096
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-nr1-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_nr1_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 7,143
| 39.134831
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-nr1-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_nr1_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 8,354
| 40.775
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-nr1-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_nr1_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
__m512 vp3 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 9,565
| 42.09009
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-nr1-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_nr1_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
__m512 vp3 = vc6;
__m512 vp4 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 10,776
| 43.168033
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-nr1-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_nr1_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
__m512 vp3 = vc6;
__m512 vp4 = vc6;
__m512 vp5 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 11,987
| 44.067669
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma-expm1minus-rr1-lut8-p4h3ts-div-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__fma_expm1minus_rr1_lut8_p4h3ts_div_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_lut8_p4h3.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_lut8_p4h3.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_lut8_p4h3.magic_bias;
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = params->scalar_expm1minus_rr1_lut8_p4h3.ln2;
const float vc4 = params->scalar_expm1minus_rr1_lut8_p4h3.c4;
const float vc3 = params->scalar_expm1minus_rr1_lut8_p4h3.c3;
const float vc2 = params->scalar_expm1minus_rr1_lut8_p4h3.c2;
const float vminus_two = params->scalar_expm1minus_rr1_lut8_p4h3.minus_two;
const float vone = params->scalar_expm1minus_rr1_lut8_p4h3.one;
do {
const float vx = *input++;
float vz = fabsf(vx);
vz = math_pmin_f32(vz, vsat_cutoff);
float vn = fmaf(vz, vminus_log2e, vmagic_bias);
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
uint32_t ve = vb << 20;
ve += vl;
const float vs = uint32_as_float(ve);
const float vt = fmaf(vn, vln2, vz);
float vp = fmaf(vc4, vt, vc3);
vp = fmaf(vp, vt, vc2);
vp = fmaf(vp, vt, vminus_two);
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = fmaf(vp, vts, vsmo);
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 2,417
| 27.785714
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma-expm1minus-rr1-lut8-p4h3ts-div-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__fma_expm1minus_rr1_lut8_p4h3ts_div_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_lut8_p4h3.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_lut8_p4h3.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_lut8_p4h3.magic_bias;
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = params->scalar_expm1minus_rr1_lut8_p4h3.ln2;
const float vc4 = params->scalar_expm1minus_rr1_lut8_p4h3.c4;
const float vc3 = params->scalar_expm1minus_rr1_lut8_p4h3.c3;
const float vc2 = params->scalar_expm1minus_rr1_lut8_p4h3.c2;
const float vminus_two = params->scalar_expm1minus_rr1_lut8_p4h3.minus_two;
const float vone = params->scalar_expm1minus_rr1_lut8_p4h3.one;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
float vz0 = fabsf(vx0);
float vz1 = fabsf(vx1);
vz0 = math_pmin_f32(vz0, vsat_cutoff);
vz1 = math_pmin_f32(vz1, vsat_cutoff);
float vn0 = fmaf(vz0, vminus_log2e, vmagic_bias);
float vn1 = fmaf(vz1, vminus_log2e, vmagic_bias);
const uint32_t vb0 = float_as_uint32(vn0);
vn0 -= vmagic_bias;
const uint32_t vb1 = float_as_uint32(vn1);
vn1 -= vmagic_bias;
const uint32_t vidx0 = vb0 & vindex_mask;
const uint32_t vidx1 = vb1 & vindex_mask;
const uint32_t vl0 = xnn_table_exp2minus_k_over_8[vidx0];
uint32_t ve0 = vb0 << 20;
const uint32_t vl1 = xnn_table_exp2minus_k_over_8[vidx1];
uint32_t ve1 = vb1 << 20;
ve0 += vl0;
ve1 += vl1;
const float vt0 = fmaf(vn0, vln2, vz0);
const float vs0 = uint32_as_float(ve0);
const float vt1 = fmaf(vn1, vln2, vz1);
const float vs1 = uint32_as_float(ve1);
float vp0 = fmaf(vc4, vt0, vc3);
float vp1 = fmaf(vc4, vt1, vc3);
vp0 = fmaf(vp0, vt0, vc2);
vp1 = fmaf(vp1, vt1, vc2);
vp0 = fmaf(vp0, vt0, vminus_two);
vp1 = fmaf(vp1, vt1, vminus_two);
const float vts0 = vt0 * vs0;
const float vsmo0 = vs0 - vone;
const float vts1 = vt1 * vs1;
const float vsmo1 = vs1 - vone;
const float vemo0 = fmaf(vp0, vts0, vsmo0);
const float vemo1 = fmaf(vp1, vts1, vsmo1);
const float vepo0 = vemo0 - vminus_two;
const float vepo1 = vemo1 - vminus_two;
float vy0 = vemo0 / vepo0;
float vy1 = vemo1 / vepo1;
vy0 = copysignf(vy0, vx0);
vy1 = copysignf(vy1, vx1);
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
float vz = fabsf(vx);
vz = math_pmin_f32(vz, vsat_cutoff);
float vn = fmaf(vz, vminus_log2e, vmagic_bias);
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
uint32_t ve = vb << 20;
ve += vl;
const float vs = uint32_as_float(ve);
const float vt = fmaf(vn, vln2, vz);
float vp = fmaf(vc4, vt, vc3);
vp = fmaf(vp, vt, vc2);
vp = fmaf(vp, vt, vminus_two);
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = fmaf(vp, vts, vsmo);
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output = vy;
}
}
| 4,150
| 27.627586
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma-expm1minus-rr1-lut8-p4h3ts-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__fma_expm1minus_rr1_lut8_p4h3ts_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_lut8_p4h3.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_lut8_p4h3.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_lut8_p4h3.magic_bias;
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = params->scalar_expm1minus_rr1_lut8_p4h3.ln2;
const float vc4 = params->scalar_expm1minus_rr1_lut8_p4h3.c4;
const float vc3 = params->scalar_expm1minus_rr1_lut8_p4h3.c3;
const float vc2 = params->scalar_expm1minus_rr1_lut8_p4h3.c2;
const float vminus_two = params->scalar_expm1minus_rr1_lut8_p4h3.minus_two;
const float vone = params->scalar_expm1minus_rr1_lut8_p4h3.one;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
float vz0 = fabsf(vx0);
float vz1 = fabsf(vx1);
float vz2 = fabsf(vx2);
float vz3 = fabsf(vx3);
vz0 = math_pmin_f32(vz0, vsat_cutoff);
vz1 = math_pmin_f32(vz1, vsat_cutoff);
vz2 = math_pmin_f32(vz2, vsat_cutoff);
vz3 = math_pmin_f32(vz3, vsat_cutoff);
float vn0 = fmaf(vz0, vminus_log2e, vmagic_bias);
float vn1 = fmaf(vz1, vminus_log2e, vmagic_bias);
float vn2 = fmaf(vz2, vminus_log2e, vmagic_bias);
float vn3 = fmaf(vz3, vminus_log2e, vmagic_bias);
const uint32_t vb0 = float_as_uint32(vn0);
vn0 -= vmagic_bias;
const uint32_t vb1 = float_as_uint32(vn1);
vn1 -= vmagic_bias;
const uint32_t vb2 = float_as_uint32(vn2);
vn2 -= vmagic_bias;
const uint32_t vb3 = float_as_uint32(vn3);
vn3 -= vmagic_bias;
const uint32_t vidx0 = vb0 & vindex_mask;
const uint32_t vidx1 = vb1 & vindex_mask;
const uint32_t vidx2 = vb2 & vindex_mask;
const uint32_t vidx3 = vb3 & vindex_mask;
const uint32_t vl0 = xnn_table_exp2minus_k_over_8[vidx0];
uint32_t ve0 = vb0 << 20;
const uint32_t vl1 = xnn_table_exp2minus_k_over_8[vidx1];
uint32_t ve1 = vb1 << 20;
const uint32_t vl2 = xnn_table_exp2minus_k_over_8[vidx2];
uint32_t ve2 = vb2 << 20;
const uint32_t vl3 = xnn_table_exp2minus_k_over_8[vidx3];
uint32_t ve3 = vb3 << 20;
ve0 += vl0;
ve1 += vl1;
ve2 += vl2;
ve3 += vl3;
const float vt0 = fmaf(vn0, vln2, vz0);
const float vs0 = uint32_as_float(ve0);
const float vt1 = fmaf(vn1, vln2, vz1);
const float vs1 = uint32_as_float(ve1);
const float vt2 = fmaf(vn2, vln2, vz2);
const float vs2 = uint32_as_float(ve2);
const float vt3 = fmaf(vn3, vln2, vz3);
const float vs3 = uint32_as_float(ve3);
float vp0 = fmaf(vc4, vt0, vc3);
float vp1 = fmaf(vc4, vt1, vc3);
float vp2 = fmaf(vc4, vt2, vc3);
float vp3 = fmaf(vc4, vt3, vc3);
vp0 = fmaf(vp0, vt0, vc2);
vp1 = fmaf(vp1, vt1, vc2);
vp2 = fmaf(vp2, vt2, vc2);
vp3 = fmaf(vp3, vt3, vc2);
vp0 = fmaf(vp0, vt0, vminus_two);
vp1 = fmaf(vp1, vt1, vminus_two);
vp2 = fmaf(vp2, vt2, vminus_two);
vp3 = fmaf(vp3, vt3, vminus_two);
const float vts0 = vt0 * vs0;
const float vsmo0 = vs0 - vone;
const float vts1 = vt1 * vs1;
const float vsmo1 = vs1 - vone;
const float vts2 = vt2 * vs2;
const float vsmo2 = vs2 - vone;
const float vts3 = vt3 * vs3;
const float vsmo3 = vs3 - vone;
const float vemo0 = fmaf(vp0, vts0, vsmo0);
const float vemo1 = fmaf(vp1, vts1, vsmo1);
const float vemo2 = fmaf(vp2, vts2, vsmo2);
const float vemo3 = fmaf(vp3, vts3, vsmo3);
const float vepo0 = vemo0 - vminus_two;
const float vepo1 = vemo1 - vminus_two;
const float vepo2 = vemo2 - vminus_two;
const float vepo3 = vemo3 - vminus_two;
float vy0 = vemo0 / vepo0;
float vy1 = vemo1 / vepo1;
float vy2 = vemo2 / vepo2;
float vy3 = vemo3 / vepo3;
vy0 = copysignf(vy0, vx0);
vy1 = copysignf(vy1, vx1);
vy2 = copysignf(vy2, vx2);
vy3 = copysignf(vy3, vx3);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
float vz = fabsf(vx);
vz = math_pmin_f32(vz, vsat_cutoff);
float vn = fmaf(vz, vminus_log2e, vmagic_bias);
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
uint32_t ve = vb << 20;
ve += vl;
const float vs = uint32_as_float(ve);
const float vt = fmaf(vn, vln2, vz);
float vp = fmaf(vc4, vt, vc3);
vp = fmaf(vp, vt, vc2);
vp = fmaf(vp, vt, vminus_two);
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = fmaf(vp, vts, vsmo);
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,906
| 29.606218
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma-expm1minus-rr1-p6h5ts-div-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma_expm1minus_rr1_p6h5ts_div_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_p6h5.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_p6h5.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_p6h5.magic_bias;
const float vln2 = params->scalar_expm1minus_rr1_p6h5.ln2;
const float vc6 = params->scalar_expm1minus_rr1_p6h5.c6;
const float vc5 = params->scalar_expm1minus_rr1_p6h5.c5;
const float vc4 = params->scalar_expm1minus_rr1_p6h5.c4;
const float vc3 = params->scalar_expm1minus_rr1_p6h5.c3;
const float vc2 = params->scalar_expm1minus_rr1_p6h5.c2;
const float vminus_two = params->scalar_expm1minus_rr1_p6h5.minus_two;
const float vone = params->scalar_expm1minus_rr1_p6h5.one;
do {
const float vx = *input++;
float vz = fabsf(vx);
vz = math_pmin_f32(vz, vsat_cutoff);
float vn = fmaf(vz, vminus_log2e, vmagic_bias);
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t ve = vb << 23;
const float vs = uint32_as_float(ve);
const float vt = fmaf(vn, vln2, vz);
float vp = fmaf(vc6, vt, vc5);
vp = fmaf(vp, vt, vc4);
vp = fmaf(vp, vt, vc3);
vp = fmaf(vp, vt, vc2);
vp = fmaf(vp, vt, vminus_two);
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = fmaf(vp, vts, vsmo);
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 2,314
| 27.231707
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma-expm1minus-rr1-p6h5ts-div-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma_expm1minus_rr1_p6h5ts_div_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_p6h5.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_p6h5.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_p6h5.magic_bias;
const float vln2 = params->scalar_expm1minus_rr1_p6h5.ln2;
const float vc6 = params->scalar_expm1minus_rr1_p6h5.c6;
const float vc5 = params->scalar_expm1minus_rr1_p6h5.c5;
const float vc4 = params->scalar_expm1minus_rr1_p6h5.c4;
const float vc3 = params->scalar_expm1minus_rr1_p6h5.c3;
const float vc2 = params->scalar_expm1minus_rr1_p6h5.c2;
const float vminus_two = params->scalar_expm1minus_rr1_p6h5.minus_two;
const float vone = params->scalar_expm1minus_rr1_p6h5.one;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
float vz0 = fabsf(vx0);
float vz1 = fabsf(vx1);
vz0 = math_pmin_f32(vz0, vsat_cutoff);
vz1 = math_pmin_f32(vz1, vsat_cutoff);
float vn0 = fmaf(vz0, vminus_log2e, vmagic_bias);
float vn1 = fmaf(vz1, vminus_log2e, vmagic_bias);
const uint32_t vb0 = float_as_uint32(vn0);
vn0 -= vmagic_bias;
const uint32_t vb1 = float_as_uint32(vn1);
vn1 -= vmagic_bias;
const uint32_t ve0 = vb0 << 23;
const uint32_t ve1 = vb1 << 23;
const float vt0 = fmaf(vn0, vln2, vz0);
const float vs0 = uint32_as_float(ve0);
const float vt1 = fmaf(vn1, vln2, vz1);
const float vs1 = uint32_as_float(ve1);
float vp0 = fmaf(vc6, vt0, vc5);
float vp1 = fmaf(vc6, vt1, vc5);
vp0 = fmaf(vp0, vt0, vc4);
vp1 = fmaf(vp1, vt1, vc4);
vp0 = fmaf(vp0, vt0, vc3);
vp1 = fmaf(vp1, vt1, vc3);
vp0 = fmaf(vp0, vt0, vc2);
vp1 = fmaf(vp1, vt1, vc2);
vp0 = fmaf(vp0, vt0, vminus_two);
vp1 = fmaf(vp1, vt1, vminus_two);
const float vts0 = vt0 * vs0;
const float vsmo0 = vs0 - vone;
const float vts1 = vt1 * vs1;
const float vsmo1 = vs1 - vone;
const float vemo0 = fmaf(vp0, vts0, vsmo0);
const float vemo1 = fmaf(vp1, vts1, vsmo1);
const float vepo0 = vemo0 - vminus_two;
const float vepo1 = vemo1 - vminus_two;
float vy0 = vemo0 / vepo0;
float vy1 = vemo1 / vepo1;
vy0 = copysignf(vy0, vx0);
vy1 = copysignf(vy1, vx1);
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
float vz = fabsf(vx);
vz = math_pmin_f32(vz, vsat_cutoff);
float vn = fmaf(vz, vminus_log2e, vmagic_bias);
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t ve = vb << 23;
const float vs = uint32_as_float(ve);
const float vt = fmaf(vn, vln2, vz);
float vp = fmaf(vc6, vt, vc5);
vp = fmaf(vp, vt, vc4);
vp = fmaf(vp, vt, vc3);
vp = fmaf(vp, vt, vc2);
vp = fmaf(vp, vt, vminus_two);
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = fmaf(vp, vts, vsmo);
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output = vy;
}
}
| 3,933
| 27.302158
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma-expm1minus-rr1-p6h5ts-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma_expm1minus_rr1_p6h5ts_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_p6h5.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_p6h5.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_p6h5.magic_bias;
const float vln2 = params->scalar_expm1minus_rr1_p6h5.ln2;
const float vc6 = params->scalar_expm1minus_rr1_p6h5.c6;
const float vc5 = params->scalar_expm1minus_rr1_p6h5.c5;
const float vc4 = params->scalar_expm1minus_rr1_p6h5.c4;
const float vc3 = params->scalar_expm1minus_rr1_p6h5.c3;
const float vc2 = params->scalar_expm1minus_rr1_p6h5.c2;
const float vminus_two = params->scalar_expm1minus_rr1_p6h5.minus_two;
const float vone = params->scalar_expm1minus_rr1_p6h5.one;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
float vz0 = fabsf(vx0);
float vz1 = fabsf(vx1);
float vz2 = fabsf(vx2);
float vz3 = fabsf(vx3);
vz0 = math_pmin_f32(vz0, vsat_cutoff);
vz1 = math_pmin_f32(vz1, vsat_cutoff);
vz2 = math_pmin_f32(vz2, vsat_cutoff);
vz3 = math_pmin_f32(vz3, vsat_cutoff);
float vn0 = fmaf(vz0, vminus_log2e, vmagic_bias);
float vn1 = fmaf(vz1, vminus_log2e, vmagic_bias);
float vn2 = fmaf(vz2, vminus_log2e, vmagic_bias);
float vn3 = fmaf(vz3, vminus_log2e, vmagic_bias);
const uint32_t vb0 = float_as_uint32(vn0);
vn0 -= vmagic_bias;
const uint32_t vb1 = float_as_uint32(vn1);
vn1 -= vmagic_bias;
const uint32_t vb2 = float_as_uint32(vn2);
vn2 -= vmagic_bias;
const uint32_t vb3 = float_as_uint32(vn3);
vn3 -= vmagic_bias;
const uint32_t ve0 = vb0 << 23;
const uint32_t ve1 = vb1 << 23;
const uint32_t ve2 = vb2 << 23;
const uint32_t ve3 = vb3 << 23;
const float vt0 = fmaf(vn0, vln2, vz0);
const float vs0 = uint32_as_float(ve0);
const float vt1 = fmaf(vn1, vln2, vz1);
const float vs1 = uint32_as_float(ve1);
const float vt2 = fmaf(vn2, vln2, vz2);
const float vs2 = uint32_as_float(ve2);
const float vt3 = fmaf(vn3, vln2, vz3);
const float vs3 = uint32_as_float(ve3);
float vp0 = fmaf(vc6, vt0, vc5);
float vp1 = fmaf(vc6, vt1, vc5);
float vp2 = fmaf(vc6, vt2, vc5);
float vp3 = fmaf(vc6, vt3, vc5);
vp0 = fmaf(vp0, vt0, vc4);
vp1 = fmaf(vp1, vt1, vc4);
vp2 = fmaf(vp2, vt2, vc4);
vp3 = fmaf(vp3, vt3, vc4);
vp0 = fmaf(vp0, vt0, vc3);
vp1 = fmaf(vp1, vt1, vc3);
vp2 = fmaf(vp2, vt2, vc3);
vp3 = fmaf(vp3, vt3, vc3);
vp0 = fmaf(vp0, vt0, vc2);
vp1 = fmaf(vp1, vt1, vc2);
vp2 = fmaf(vp2, vt2, vc2);
vp3 = fmaf(vp3, vt3, vc2);
vp0 = fmaf(vp0, vt0, vminus_two);
vp1 = fmaf(vp1, vt1, vminus_two);
vp2 = fmaf(vp2, vt2, vminus_two);
vp3 = fmaf(vp3, vt3, vminus_two);
const float vts0 = vt0 * vs0;
const float vsmo0 = vs0 - vone;
const float vts1 = vt1 * vs1;
const float vsmo1 = vs1 - vone;
const float vts2 = vt2 * vs2;
const float vsmo2 = vs2 - vone;
const float vts3 = vt3 * vs3;
const float vsmo3 = vs3 - vone;
const float vemo0 = fmaf(vp0, vts0, vsmo0);
const float vemo1 = fmaf(vp1, vts1, vsmo1);
const float vemo2 = fmaf(vp2, vts2, vsmo2);
const float vemo3 = fmaf(vp3, vts3, vsmo3);
const float vepo0 = vemo0 - vminus_two;
const float vepo1 = vemo1 - vminus_two;
const float vepo2 = vemo2 - vminus_two;
const float vepo3 = vemo3 - vminus_two;
float vy0 = vemo0 / vepo0;
float vy1 = vemo1 / vepo1;
float vy2 = vemo2 / vepo2;
float vy3 = vemo3 / vepo3;
vy0 = copysignf(vy0, vx0);
vy1 = copysignf(vy1, vx1);
vy2 = copysignf(vy2, vx2);
vy3 = copysignf(vy3, vx3);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
float vz = fabsf(vx);
vz = math_pmin_f32(vz, vsat_cutoff);
float vn = fmaf(vz, vminus_log2e, vmagic_bias);
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t ve = vb << 23;
const float vs = uint32_as_float(ve);
const float vt = fmaf(vn, vln2, vz);
float vp = fmaf(vc6, vt, vc5);
vp = fmaf(vp, vt, vc4);
vp = fmaf(vp, vt, vc3);
vp = fmaf(vp, vt, vc2);
vp = fmaf(vp, vt, vminus_two);
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = fmaf(vp, vts, vsmo);
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,575
| 29.140541
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 7,550
| 36.014706
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 8,814
| 37.662281
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-div-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_div_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 10,078
| 38.996032
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-div-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_div_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 11,342
| 40.097826
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-div-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_div_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
__m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23)));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m256 vy5 = _mm256_div_ps(vemo5, vepo5);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 12,606
| 41.023333
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-div-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_div_x56(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1);
const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
__m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23)));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23));
__m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23)));
const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1);
vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m256 vy5 = _mm256_div_ps(vemo5, vepo5);
__m256 vy6 = _mm256_div_ps(vemo6, vepo6);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 13,870
| 41.811728
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-div-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_div_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
vz7 = _mm256_max_ps(vsat_cutoff, vz7);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1);
const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1);
const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
__m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23)));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23));
__m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23)));
const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23));
__m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23)));
const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1);
vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1);
vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m256 vy5 = _mm256_div_ps(vemo5, vepo5);
__m256 vy6 = _mm256_div_ps(vemo6, vepo6);
__m256 vy7 = _mm256_div_ps(vemo7, vepo7);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 15,134
| 42.491379
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-div-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_div_x72(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
__m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
vz7 = _mm256_max_ps(vsat_cutoff, vz7);
const __m256 vinvsignx8 = _mm256_xor_ps(vx8, vz8);
vz8 = _mm256_max_ps(vsat_cutoff, vz8);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1);
const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1);
const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1);
const __m128 vn8_hi = _mm256_extractf128_ps(vn8, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
__m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23)));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23));
__m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23)));
const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23));
__m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23)));
const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23));
__m256 vs8 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23)));
const __m128 vs8_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn8_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1);
vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1);
vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1);
vs8 = _mm256_insertf128_ps(vs8, vs8_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
__m256 vp8 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m256 vy5 = _mm256_div_ps(vemo5, vepo5);
__m256 vy6 = _mm256_div_ps(vemo6, vepo6);
__m256 vy7 = _mm256_div_ps(vemo7, vepo7);
__m256 vy8 = _mm256_div_ps(vemo8, vepo8);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
vy8 = _mm256_xor_ps(vy8, vinvsignx8);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 16,398
| 43.083333
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 4,911
| 34.594203
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-div-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_div_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
__m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
__m256 vz9 = _mm256_or_ps(vx9, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
vz7 = _mm256_max_ps(vsat_cutoff, vz7);
const __m256 vinvsignx8 = _mm256_xor_ps(vx8, vz8);
vz8 = _mm256_max_ps(vsat_cutoff, vz8);
const __m256 vinvsignx9 = _mm256_xor_ps(vx9, vz9);
vz9 = _mm256_max_ps(vsat_cutoff, vz9);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1);
const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1);
const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1);
const __m128 vn8_hi = _mm256_extractf128_ps(vn8, 1);
const __m128 vn9_hi = _mm256_extractf128_ps(vn9, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
__m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23)));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23));
__m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23)));
const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23));
__m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23)));
const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23));
__m256 vs8 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23)));
const __m128 vs8_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn8_hi), 23));
__m256 vs9 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn9)), 23)));
const __m128 vs9_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn9_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1);
vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1);
vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1);
vs8 = _mm256_insertf128_ps(vs8, vs8_hi, 1);
vs9 = _mm256_insertf128_ps(vs9, vs9_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
const __m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
__m256 vp8 = vc6;
__m256 vp9 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc5);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
vp9 = _mm256_fmadd_ps(vp9, vt9, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vts9 = _mm256_mul_ps(vt9, vs9);
const __m256 vsmo9 = _mm256_add_ps(vs9, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vemo9 = _mm256_fmadd_ps(vp9, vts9, vsmo9);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
const __m256 vepo9 = _mm256_add_ps(vemo9, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m256 vy5 = _mm256_div_ps(vemo5, vepo5);
__m256 vy6 = _mm256_div_ps(vemo6, vepo6);
__m256 vy7 = _mm256_div_ps(vemo7, vepo7);
__m256 vy8 = _mm256_div_ps(vemo8, vepo8);
__m256 vy9 = _mm256_div_ps(vemo9, vepo9);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
vy8 = _mm256_xor_ps(vy8, vinvsignx8);
vy9 = _mm256_xor_ps(vy9, vinvsignx9);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
_mm256_storeu_ps(output + 72, vy9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 17,662
| 43.603535
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 8,536
| 36.774336
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 10,051
| 38.574803
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 11,566
| 40.01773
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 13,081
| 41.2
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
__m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23)));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 14,596
| 42.186391
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1_x56(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1);
const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
__m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23)));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23));
__m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23)));
const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1);
vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5);
vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 16,111
| 43.021858
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
const __m256 vm7 = _mm256_cmp_ps(vz7, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1);
const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1);
const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
__m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23)));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23));
__m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23)));
const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23));
__m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23)));
const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1);
vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1);
vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vrepo7 = _mm256_rcp_ps(vepo7);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
const __m256 verepo7 = _mm256_fnmsub_ps(vrepo7, vepo7, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm256_fmadd_ps(verepo7, vrepo7, vrepo7);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
__m256 vy7 = _mm256_mul_ps(vemo7, vrepo7);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5);
vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6);
vy7 = _mm256_blendv_ps(vy7, vminus_one, vm7);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 17,626
| 43.738579
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1_x72(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
const __m256 vm7 = _mm256_cmp_ps(vz7, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx8 = _mm256_xor_ps(vx8, vz8);
const __m256 vm8 = _mm256_cmp_ps(vz8, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1);
const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1);
const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1);
const __m128 vn8_hi = _mm256_extractf128_ps(vn8, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
__m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23)));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23));
__m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23)));
const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23));
__m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23)));
const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23));
__m256 vs8 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23)));
const __m128 vs8_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn8_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1);
vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1);
vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1);
vs8 = _mm256_insertf128_ps(vs8, vs8_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
__m256 vp8 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vrepo7 = _mm256_rcp_ps(vepo7);
__m256 vrepo8 = _mm256_rcp_ps(vepo8);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
const __m256 verepo7 = _mm256_fnmsub_ps(vrepo7, vepo7, vminus_one);
const __m256 verepo8 = _mm256_fnmsub_ps(vrepo8, vepo8, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm256_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm256_fmadd_ps(verepo8, vrepo8, vrepo8);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
__m256 vy7 = _mm256_mul_ps(vemo7, vrepo7);
__m256 vy8 = _mm256_mul_ps(vemo8, vrepo8);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5);
vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6);
vy7 = _mm256_blendv_ps(vy7, vminus_one, vm7);
vy8 = _mm256_blendv_ps(vy8, vminus_one, vm8);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
vy8 = _mm256_xor_ps(vy8, vinvsignx8);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 19,141
| 44.36019
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 5,393
| 34.96
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
const __m256 vz9 = _mm256_or_ps(vx9, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
const __m256 vm7 = _mm256_cmp_ps(vz7, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx8 = _mm256_xor_ps(vx8, vz8);
const __m256 vm8 = _mm256_cmp_ps(vz8, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx9 = _mm256_xor_ps(vx9, vz9);
const __m256 vm9 = _mm256_cmp_ps(vz9, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1);
const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1);
const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1);
const __m128 vn8_hi = _mm256_extractf128_ps(vn8, 1);
const __m128 vn9_hi = _mm256_extractf128_ps(vn9, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
__m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23)));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23));
__m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23)));
const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23));
__m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23)));
const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23));
__m256 vs8 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23)));
const __m128 vs8_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn8_hi), 23));
__m256 vs9 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn9)), 23)));
const __m128 vs9_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn9_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1);
vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1);
vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1);
vs8 = _mm256_insertf128_ps(vs8, vs8_hi, 1);
vs9 = _mm256_insertf128_ps(vs9, vs9_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
const __m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
__m256 vp8 = vc6;
__m256 vp9 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc5);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
vp9 = _mm256_fmadd_ps(vp9, vt9, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vts9 = _mm256_mul_ps(vt9, vs9);
const __m256 vsmo9 = _mm256_add_ps(vs9, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vemo9 = _mm256_fmadd_ps(vp9, vts9, vsmo9);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
const __m256 vepo9 = _mm256_add_ps(vemo9, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vrepo7 = _mm256_rcp_ps(vepo7);
__m256 vrepo8 = _mm256_rcp_ps(vepo8);
__m256 vrepo9 = _mm256_rcp_ps(vepo9);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
const __m256 verepo7 = _mm256_fnmsub_ps(vrepo7, vepo7, vminus_one);
const __m256 verepo8 = _mm256_fnmsub_ps(vrepo8, vepo8, vminus_one);
const __m256 verepo9 = _mm256_fnmsub_ps(vrepo9, vepo9, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm256_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm256_fmadd_ps(verepo8, vrepo8, vrepo8);
vrepo9 = _mm256_fmadd_ps(verepo9, vrepo9, vrepo9);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
__m256 vy7 = _mm256_mul_ps(vemo7, vrepo7);
__m256 vy8 = _mm256_mul_ps(vemo8, vrepo8);
__m256 vy9 = _mm256_mul_ps(vemo9, vrepo9);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5);
vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6);
vy7 = _mm256_blendv_ps(vy7, vminus_one, vm7);
vy8 = _mm256_blendv_ps(vy8, vminus_one, vm8);
vy9 = _mm256_blendv_ps(vy9, vminus_one, vm9);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
vy8 = _mm256_xor_ps(vy8, vinvsignx8);
vy9 = _mm256_xor_ps(vy9, vinvsignx9);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
_mm256_storeu_ps(output + 72, vy9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 20,656
| 44.904444
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1adj-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1adj_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 8,633
| 36.53913
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1adj-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1adj_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 10,174
| 38.285714
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1adj-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1adj_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
const __m256 vey3 = _mm256_fnmadd_ps(vy3, vepo3, vemo3);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm256_fmadd_ps(vey3, vrepo3, vy3);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 11,715
| 39.680556
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1adj-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1adj_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
const __m256 vey3 = _mm256_fnmadd_ps(vy3, vepo3, vemo3);
const __m256 vey4 = _mm256_fnmadd_ps(vy4, vepo4, vemo4);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm256_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm256_fmadd_ps(vey4, vrepo4, vy4);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 13,256
| 40.820189
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1adj-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1adj_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
__m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23)));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
const __m256 vey3 = _mm256_fnmadd_ps(vy3, vepo3, vemo3);
const __m256 vey4 = _mm256_fnmadd_ps(vy4, vepo4, vemo4);
const __m256 vey5 = _mm256_fnmadd_ps(vy5, vepo5, vemo5);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm256_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm256_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm256_fmadd_ps(vey5, vrepo5, vy5);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 14,797
| 41.768786
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1adj-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1adj_x56(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1);
const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
__m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23)));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23));
__m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23)));
const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1);
vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
const __m256 vey3 = _mm256_fnmadd_ps(vy3, vepo3, vemo3);
const __m256 vey4 = _mm256_fnmadd_ps(vy4, vepo4, vemo4);
const __m256 vey5 = _mm256_fnmadd_ps(vy5, vepo5, vemo5);
const __m256 vey6 = _mm256_fnmadd_ps(vy6, vepo6, vemo6);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm256_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm256_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm256_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm256_fmadd_ps(vey6, vrepo6, vy6);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 16,338
| 42.570667
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1adj-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1adj_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
vz7 = _mm256_max_ps(vsat_cutoff, vz7);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1);
const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1);
const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
__m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23)));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23));
__m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23)));
const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23));
__m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23)));
const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1);
vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1);
vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vrepo7 = _mm256_rcp_ps(vepo7);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
const __m256 verepo7 = _mm256_fnmsub_ps(vrepo7, vepo7, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm256_fmadd_ps(verepo7, vrepo7, vrepo7);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
__m256 vy7 = _mm256_mul_ps(vemo7, vrepo7);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
const __m256 vey3 = _mm256_fnmadd_ps(vy3, vepo3, vemo3);
const __m256 vey4 = _mm256_fnmadd_ps(vy4, vepo4, vemo4);
const __m256 vey5 = _mm256_fnmadd_ps(vy5, vepo5, vemo5);
const __m256 vey6 = _mm256_fnmadd_ps(vy6, vepo6, vemo6);
const __m256 vey7 = _mm256_fnmadd_ps(vy7, vepo7, vemo7);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm256_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm256_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm256_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm256_fmadd_ps(vey6, vrepo6, vy6);
vy7 = _mm256_fmadd_ps(vey7, vrepo7, vy7);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 17,879
| 43.257426
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1adj-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1adj_x72(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
__m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
vz7 = _mm256_max_ps(vsat_cutoff, vz7);
const __m256 vinvsignx8 = _mm256_xor_ps(vx8, vz8);
vz8 = _mm256_max_ps(vsat_cutoff, vz8);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1);
const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1);
const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1);
const __m128 vn8_hi = _mm256_extractf128_ps(vn8, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
__m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23)));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23));
__m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23)));
const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23));
__m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23)));
const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23));
__m256 vs8 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23)));
const __m128 vs8_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn8_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1);
vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1);
vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1);
vs8 = _mm256_insertf128_ps(vs8, vs8_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
__m256 vp8 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vrepo7 = _mm256_rcp_ps(vepo7);
__m256 vrepo8 = _mm256_rcp_ps(vepo8);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
const __m256 verepo7 = _mm256_fnmsub_ps(vrepo7, vepo7, vminus_one);
const __m256 verepo8 = _mm256_fnmsub_ps(vrepo8, vepo8, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm256_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm256_fmadd_ps(verepo8, vrepo8, vrepo8);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
__m256 vy7 = _mm256_mul_ps(vemo7, vrepo7);
__m256 vy8 = _mm256_mul_ps(vemo8, vrepo8);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
const __m256 vey3 = _mm256_fnmadd_ps(vy3, vepo3, vemo3);
const __m256 vey4 = _mm256_fnmadd_ps(vy4, vepo4, vemo4);
const __m256 vey5 = _mm256_fnmadd_ps(vy5, vepo5, vemo5);
const __m256 vey6 = _mm256_fnmadd_ps(vy6, vepo6, vemo6);
const __m256 vey7 = _mm256_fnmadd_ps(vy7, vepo7, vemo7);
const __m256 vey8 = _mm256_fnmadd_ps(vy8, vepo8, vemo8);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm256_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm256_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm256_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm256_fmadd_ps(vey6, vrepo6, vy6);
vy7 = _mm256_fmadd_ps(vey7, vrepo7, vy7);
vy8 = _mm256_fmadd_ps(vey8, vrepo8, vy8);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
vy8 = _mm256_xor_ps(vy8, vinvsignx8);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 19,420
| 43.852194
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1adj-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1adj_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 5,438
| 34.782895
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-fma3-expm1minus-rr1-p6h5ts-nr1adj-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__fma3_expm1minus_rr1_p6h5ts_nr1adj_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
__m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
__m256 vz9 = _mm256_or_ps(vx9, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
vz7 = _mm256_max_ps(vsat_cutoff, vz7);
const __m256 vinvsignx8 = _mm256_xor_ps(vx8, vz8);
vz8 = _mm256_max_ps(vsat_cutoff, vz8);
const __m256 vinvsignx9 = _mm256_xor_ps(vx9, vz9);
vz9 = _mm256_max_ps(vsat_cutoff, vz9);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1);
const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1);
const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1);
const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1);
const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1);
const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1);
const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1);
const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1);
const __m128 vn8_hi = _mm256_extractf128_ps(vn8, 1);
const __m128 vn9_hi = _mm256_extractf128_ps(vn9, 1);
__m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23)));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23));
__m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23)));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23));
__m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23)));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23));
__m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23)));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23));
__m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23)));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23));
__m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23)));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23));
__m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23)));
const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23));
__m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23)));
const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23));
__m256 vs8 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23)));
const __m128 vs8_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn8_hi), 23));
__m256 vs9 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn9)), 23)));
const __m128 vs9_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn9_hi), 23));
vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1);
vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1);
vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1);
vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1);
vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1);
vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1);
vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1);
vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1);
vs8 = _mm256_insertf128_ps(vs8, vs8_hi, 1);
vs9 = _mm256_insertf128_ps(vs9, vs9_hi, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
const __m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
__m256 vp8 = vc6;
__m256 vp9 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc5);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
vp9 = _mm256_fmadd_ps(vp9, vt9, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vts9 = _mm256_mul_ps(vt9, vs9);
const __m256 vsmo9 = _mm256_add_ps(vs9, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vemo9 = _mm256_fmadd_ps(vp9, vts9, vsmo9);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
const __m256 vepo9 = _mm256_add_ps(vemo9, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vrepo7 = _mm256_rcp_ps(vepo7);
__m256 vrepo8 = _mm256_rcp_ps(vepo8);
__m256 vrepo9 = _mm256_rcp_ps(vepo9);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
const __m256 verepo7 = _mm256_fnmsub_ps(vrepo7, vepo7, vminus_one);
const __m256 verepo8 = _mm256_fnmsub_ps(vrepo8, vepo8, vminus_one);
const __m256 verepo9 = _mm256_fnmsub_ps(vrepo9, vepo9, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm256_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm256_fmadd_ps(verepo8, vrepo8, vrepo8);
vrepo9 = _mm256_fmadd_ps(verepo9, vrepo9, vrepo9);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
__m256 vy7 = _mm256_mul_ps(vemo7, vrepo7);
__m256 vy8 = _mm256_mul_ps(vemo8, vrepo8);
__m256 vy9 = _mm256_mul_ps(vemo9, vrepo9);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
const __m256 vey3 = _mm256_fnmadd_ps(vy3, vepo3, vemo3);
const __m256 vey4 = _mm256_fnmadd_ps(vy4, vepo4, vemo4);
const __m256 vey5 = _mm256_fnmadd_ps(vy5, vepo5, vemo5);
const __m256 vey6 = _mm256_fnmadd_ps(vy6, vepo6, vemo6);
const __m256 vey7 = _mm256_fnmadd_ps(vy7, vepo7, vemo7);
const __m256 vey8 = _mm256_fnmadd_ps(vy8, vepo8, vemo8);
const __m256 vey9 = _mm256_fnmadd_ps(vy9, vepo9, vemo9);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm256_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm256_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm256_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm256_fmadd_ps(vey6, vrepo6, vy6);
vy7 = _mm256_fmadd_ps(vey7, vrepo7, vy7);
vy8 = _mm256_fmadd_ps(vey8, vrepo8, vy8);
vy9 = _mm256_fmadd_ps(vey9, vrepo9, vy9);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
vy8 = _mm256_xor_ps(vy8, vinvsignx8);
vy9 = _mm256_xor_ps(vy9, vinvsignx9);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
_mm256_storeu_ps(output + 72, vy9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m128 vn_hi = _mm256_extractf128_ps(vn, 1);
__m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23)));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23));
vs = _mm256_insertf128_ps(vs, vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 20,961
| 44.372294
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neon-expm1minus-rr1-p6h5ts-nr2recps-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neon_expm1minus_rr1_p6h5ts_nr2recps_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vz89AB = vabsq_f32(vx89AB);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
vz89AB = vminq_f32(vz89AB, vsat_cutoff);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
const float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2);
const float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vp0123 = vmlaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc5, vc6, vt89AB);
vp0123 = vmlaq_f32(vc4, vp0123, vt0123);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc4, vp4567, vt4567);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc4, vp89AB, vt89AB);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vmlsq_f32(vtwo, vp0123, vt0123);
vp4567 = vmlsq_f32(vtwo, vp4567, vt4567);
vp89AB = vmlsq_f32(vtwo, vp89AB, vt89AB);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vts89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t vsmo89AB = vsubq_f32(vs89AB, vone);
const float32x4_t vemo0123 = vmlsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vmlsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vemo89AB = vmlsq_f32(vsmo89AB, vp89AB, vts89AB);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
const float32x4_t vepo89AB = vaddq_f32(vemo89AB, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t vrepo89AB = vrecpeq_f32(vepo89AB);
float32x4_t verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
float32x4_t verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
float32x4_t verepo89AB = vrecpsq_f32(vrepo89AB, vepo89AB);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
vrepo89AB = vmulq_f32(vrepo89AB, verepo89AB);
verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
verepo89AB = vrecpsq_f32(vrepo89AB, vepo89AB);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
vrepo89AB = vmulq_f32(vrepo89AB, verepo89AB);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
float32x4_t vy89AB = vmulq_f32(vemo89AB, vrepo89AB);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vy89AB = vbslq_f32(vsign_mask, vx89AB, vy89AB);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vmlaq_f32(vz, vn, vln2);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vmlsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vmlaq_f32(vz, vn, vln2);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vmlsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 8,293
| 36.529412
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neon-expm1minus-rr1-p6h5ts-nr2recps-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neon_expm1minus_rr1_p6h5ts_nr2recps_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vzCDEF = vabsq_f32(vxCDEF);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
vz89AB = vminq_f32(vz89AB, vsat_cutoff);
vzCDEF = vminq_f32(vzCDEF, vsat_cutoff);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
const float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2);
const float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2);
const float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vp0123 = vmlaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc5, vc6, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc5, vc6, vtCDEF);
vp0123 = vmlaq_f32(vc4, vp0123, vt0123);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc4, vp4567, vt4567);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc4, vp89AB, vt89AB);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc4, vpCDEF, vtCDEF);
vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vmlsq_f32(vtwo, vp0123, vt0123);
vp4567 = vmlsq_f32(vtwo, vp4567, vt4567);
vp89AB = vmlsq_f32(vtwo, vp89AB, vt89AB);
vpCDEF = vmlsq_f32(vtwo, vpCDEF, vtCDEF);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vts89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t vsmo89AB = vsubq_f32(vs89AB, vone);
const float32x4_t vtsCDEF = vmulq_f32(vtCDEF, vsCDEF);
const float32x4_t vsmoCDEF = vsubq_f32(vsCDEF, vone);
const float32x4_t vemo0123 = vmlsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vmlsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vemo89AB = vmlsq_f32(vsmo89AB, vp89AB, vts89AB);
const float32x4_t vemoCDEF = vmlsq_f32(vsmoCDEF, vpCDEF, vtsCDEF);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
const float32x4_t vepo89AB = vaddq_f32(vemo89AB, vtwo);
const float32x4_t vepoCDEF = vaddq_f32(vemoCDEF, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t vrepo89AB = vrecpeq_f32(vepo89AB);
float32x4_t vrepoCDEF = vrecpeq_f32(vepoCDEF);
float32x4_t verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
float32x4_t verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
float32x4_t verepo89AB = vrecpsq_f32(vrepo89AB, vepo89AB);
float32x4_t verepoCDEF = vrecpsq_f32(vrepoCDEF, vepoCDEF);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
vrepo89AB = vmulq_f32(vrepo89AB, verepo89AB);
vrepoCDEF = vmulq_f32(vrepoCDEF, verepoCDEF);
verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
verepo89AB = vrecpsq_f32(vrepo89AB, vepo89AB);
verepoCDEF = vrecpsq_f32(vrepoCDEF, vepoCDEF);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
vrepo89AB = vmulq_f32(vrepo89AB, verepo89AB);
vrepoCDEF = vmulq_f32(vrepoCDEF, verepoCDEF);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
float32x4_t vy89AB = vmulq_f32(vemo89AB, vrepo89AB);
float32x4_t vyCDEF = vmulq_f32(vemoCDEF, vrepoCDEF);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vy89AB = vbslq_f32(vsign_mask, vx89AB, vy89AB);
vyCDEF = vbslq_f32(vsign_mask, vxCDEF, vyCDEF);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vmlaq_f32(vz, vn, vln2);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vmlsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vmlaq_f32(vz, vn, vln2);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vmlsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 9,626
| 38.293878
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neon-expm1minus-rr1-p6h5ts-nr2recps-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neon_expm1minus_rr1_p6h5ts_nr2recps_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vmlaq_f32(vz, vn, vln2);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vmlsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vmlaq_f32(vz, vn, vln2);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vmlsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 4,208
| 30.177778
| 96
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neon-expm1minus-rr1-p6h5ts-nr2recps-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neon_expm1minus_rr1_p6h5ts_nr2recps_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
const float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2);
float32x4_t vp0123 = vmlaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567);
vp0123 = vmlaq_f32(vc4, vp0123, vt0123);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc4, vp4567, vt4567);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp0123 = vmlsq_f32(vtwo, vp0123, vt0123);
vp4567 = vmlsq_f32(vtwo, vp4567, vt4567);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vemo0123 = vmlsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vmlsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
float32x4_t verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vmlaq_f32(vz, vn, vln2);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vmlsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vmlaq_f32(vz, vn, vln2);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vmlsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 6,957
| 34.319797
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-lut8-p4h3ts-nr1recps1fma-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_lut8_p4h3ts_nr1recps1fma_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.magic_bias);
const uint64x2_t vindex_mask = vreinterpretq_u64_u32(vmovq_n_u32(UINT32_C(0x7)));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.ln2);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vz89AB = vabsq_f32(vx89AB);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
vz89AB = vminq_f32(vz89AB, vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const uint32x4_t ve0123 = vshlq_n_u32(vreinterpretq_u32_f32(vn0123), 20);
const uint64x2_t vidx0123 = vandq_u64(vreinterpretq_u64_f32(vn0123), vindex_mask);
const uint32x4_t ve4567 = vshlq_n_u32(vreinterpretq_u32_f32(vn4567), 20);
const uint64x2_t vidx4567 = vandq_u64(vreinterpretq_u64_f32(vn4567), vindex_mask);
const uint32x4_t ve89AB = vshlq_n_u32(vreinterpretq_u32_f32(vn89AB), 20);
const uint64x2_t vidx89AB = vandq_u64(vreinterpretq_u64_f32(vn89AB), vindex_mask);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
uint32x2_t vl01 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx01]);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
uint32x2_t vl45 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx45]);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
uint32x2_t vl89 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx89]);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
uint32x2_t vl23 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx23]);
uint32x2_t vl67 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx67]);
uint32x2_t vlAB = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidxAB]);
vl01 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx23 >> 32)], vl23, 1);
vl45 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx67 >> 32)], vl67, 1);
vl89 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const uint32x4_t vl0123 = vcombine_u32(vl01, vl23);
const uint32x4_t vl4567 = vcombine_u32(vl45, vl67);
const uint32x4_t vl89AB = vcombine_u32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_u32(vaddq_u32(vl0123, ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_u32(vaddq_u32(vl4567, ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_u32(vaddq_u32(vl89AB, ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
const float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
const float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vp0123 = vfmaq_f32(vc3, vc4, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc3, vc4, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc3, vc4, vt89AB);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vfmsq_f32(vtwo, vp0123, vt0123);
vp4567 = vfmsq_f32(vtwo, vp4567, vt4567);
vp89AB = vfmsq_f32(vtwo, vp89AB, vt89AB);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vts89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t vsmo89AB = vsubq_f32(vs89AB, vone);
const float32x4_t vemo0123 = vfmsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vfmsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vemo89AB = vfmsq_f32(vsmo89AB, vp89AB, vts89AB);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
const float32x4_t vepo89AB = vaddq_f32(vemo89AB, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t vrepo89AB = vrecpeq_f32(vepo89AB);
float32x4_t verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
float32x4_t verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
float32x4_t verepo89AB = vrecpsq_f32(vrepo89AB, vepo89AB);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
vrepo89AB = vmulq_f32(vrepo89AB, verepo89AB);
verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
verepo89AB = vfmsq_f32(vone, vrepo89AB, vepo89AB);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
vrepo89AB = vfmaq_f32(vrepo89AB, vrepo89AB, verepo89AB);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
float32x4_t vy89AB = vmulq_f32(vemo89AB, vrepo89AB);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vy89AB = vbslq_f32(vsign_mask, vx89AB, vy89AB);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 11,323
| 43.234375
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-lut8-p4h3ts-nr1recps1fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_lut8_p4h3ts_nr1recps1fma_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.magic_bias);
const uint64x2_t vindex_mask = vreinterpretq_u64_u32(vmovq_n_u32(UINT32_C(0x7)));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.ln2);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vzCDEF = vabsq_f32(vxCDEF);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
vz89AB = vminq_f32(vz89AB, vsat_cutoff);
vzCDEF = vminq_f32(vzCDEF, vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const uint32x4_t ve0123 = vshlq_n_u32(vreinterpretq_u32_f32(vn0123), 20);
const uint64x2_t vidx0123 = vandq_u64(vreinterpretq_u64_f32(vn0123), vindex_mask);
const uint32x4_t ve4567 = vshlq_n_u32(vreinterpretq_u32_f32(vn4567), 20);
const uint64x2_t vidx4567 = vandq_u64(vreinterpretq_u64_f32(vn4567), vindex_mask);
const uint32x4_t ve89AB = vshlq_n_u32(vreinterpretq_u32_f32(vn89AB), 20);
const uint64x2_t vidx89AB = vandq_u64(vreinterpretq_u64_f32(vn89AB), vindex_mask);
const uint32x4_t veCDEF = vshlq_n_u32(vreinterpretq_u32_f32(vnCDEF), 20);
const uint64x2_t vidxCDEF = vandq_u64(vreinterpretq_u64_f32(vnCDEF), vindex_mask);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
uint32x2_t vl01 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx01]);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
uint32x2_t vl45 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx45]);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
uint32x2_t vl89 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx89]);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
uint32x2_t vlCD = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidxCD]);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
uint32x2_t vl23 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx23]);
uint32x2_t vl67 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx67]);
uint32x2_t vlAB = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidxAB]);
uint32x2_t vlEF = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidxEF]);
vl01 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx23 >> 32)], vl23, 1);
vl45 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx67 >> 32)], vl67, 1);
vl89 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidxAB >> 32)], vlAB, 1);
vlCD = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const uint32x4_t vl0123 = vcombine_u32(vl01, vl23);
const uint32x4_t vl4567 = vcombine_u32(vl45, vl67);
const uint32x4_t vl89AB = vcombine_u32(vl89, vlAB);
const uint32x4_t vlCDEF = vcombine_u32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_u32(vaddq_u32(vl0123, ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_u32(vaddq_u32(vl4567, ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_u32(vaddq_u32(vl89AB, ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_u32(vaddq_u32(vlCDEF, veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
const float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
const float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
const float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vp0123 = vfmaq_f32(vc3, vc4, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc3, vc4, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc3, vc4, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc3, vc4, vtCDEF);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vfmsq_f32(vtwo, vp0123, vt0123);
vp4567 = vfmsq_f32(vtwo, vp4567, vt4567);
vp89AB = vfmsq_f32(vtwo, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtwo, vpCDEF, vtCDEF);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vts89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t vsmo89AB = vsubq_f32(vs89AB, vone);
const float32x4_t vtsCDEF = vmulq_f32(vtCDEF, vsCDEF);
const float32x4_t vsmoCDEF = vsubq_f32(vsCDEF, vone);
const float32x4_t vemo0123 = vfmsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vfmsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vemo89AB = vfmsq_f32(vsmo89AB, vp89AB, vts89AB);
const float32x4_t vemoCDEF = vfmsq_f32(vsmoCDEF, vpCDEF, vtsCDEF);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
const float32x4_t vepo89AB = vaddq_f32(vemo89AB, vtwo);
const float32x4_t vepoCDEF = vaddq_f32(vemoCDEF, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t vrepo89AB = vrecpeq_f32(vepo89AB);
float32x4_t vrepoCDEF = vrecpeq_f32(vepoCDEF);
float32x4_t verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
float32x4_t verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
float32x4_t verepo89AB = vrecpsq_f32(vrepo89AB, vepo89AB);
float32x4_t verepoCDEF = vrecpsq_f32(vrepoCDEF, vepoCDEF);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
vrepo89AB = vmulq_f32(vrepo89AB, verepo89AB);
vrepoCDEF = vmulq_f32(vrepoCDEF, verepoCDEF);
verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
verepo89AB = vfmsq_f32(vone, vrepo89AB, vepo89AB);
verepoCDEF = vfmsq_f32(vone, vrepoCDEF, vepoCDEF);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
vrepo89AB = vfmaq_f32(vrepo89AB, vrepo89AB, verepo89AB);
vrepoCDEF = vfmaq_f32(vrepoCDEF, vrepoCDEF, verepoCDEF);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
float32x4_t vy89AB = vmulq_f32(vemo89AB, vrepo89AB);
float32x4_t vyCDEF = vmulq_f32(vemoCDEF, vrepoCDEF);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vy89AB = vbslq_f32(vsign_mask, vx89AB, vy89AB);
vyCDEF = vbslq_f32(vsign_mask, vxCDEF, vyCDEF);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 13,253
| 45.181185
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-lut8-p4h3ts-nr1recps1fma-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_lut8_p4h3ts_nr1recps1fma_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.magic_bias);
const uint64x2_t vindex_mask = vreinterpretq_u64_u32(vmovq_n_u32(UINT32_C(0x7)));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.ln2);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 5,447
| 35.563758
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-lut8-p4h3ts-nr1recps1fma-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_lut8_p4h3ts_nr1recps1fma_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.magic_bias);
const uint64x2_t vindex_mask = vreinterpretq_u64_u32(vmovq_n_u32(UINT32_C(0x7)));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.ln2);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const uint32x4_t ve0123 = vshlq_n_u32(vreinterpretq_u32_f32(vn0123), 20);
const uint64x2_t vidx0123 = vandq_u64(vreinterpretq_u64_f32(vn0123), vindex_mask);
const uint32x4_t ve4567 = vshlq_n_u32(vreinterpretq_u32_f32(vn4567), 20);
const uint64x2_t vidx4567 = vandq_u64(vreinterpretq_u64_f32(vn4567), vindex_mask);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
uint32x2_t vl01 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx01]);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
uint32x2_t vl45 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx45]);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
uint32x2_t vl23 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx23]);
uint32x2_t vl67 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx67]);
vl01 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx23 >> 32)], vl23, 1);
vl45 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx67 >> 32)], vl67, 1);
const uint32x4_t vl0123 = vcombine_u32(vl01, vl23);
const uint32x4_t vl4567 = vcombine_u32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_u32(vaddq_u32(vl0123, ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_u32(vaddq_u32(vl4567, ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
const float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vp0123 = vfmaq_f32(vc3, vc4, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc3, vc4, vt4567);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp0123 = vfmsq_f32(vtwo, vp0123, vt0123);
vp4567 = vfmsq_f32(vtwo, vp4567, vt4567);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vemo0123 = vfmsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vfmsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
float32x4_t verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 9,390
| 40.737778
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-lut8-p4h3ts-nr2fma-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_lut8_p4h3ts_nr2fma_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.magic_bias);
const uint64x2_t vindex_mask = vreinterpretq_u64_u32(vmovq_n_u32(UINT32_C(0x7)));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.ln2);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vz89AB = vabsq_f32(vx89AB);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
vz89AB = vminq_f32(vz89AB, vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const uint32x4_t ve0123 = vshlq_n_u32(vreinterpretq_u32_f32(vn0123), 20);
const uint64x2_t vidx0123 = vandq_u64(vreinterpretq_u64_f32(vn0123), vindex_mask);
const uint32x4_t ve4567 = vshlq_n_u32(vreinterpretq_u32_f32(vn4567), 20);
const uint64x2_t vidx4567 = vandq_u64(vreinterpretq_u64_f32(vn4567), vindex_mask);
const uint32x4_t ve89AB = vshlq_n_u32(vreinterpretq_u32_f32(vn89AB), 20);
const uint64x2_t vidx89AB = vandq_u64(vreinterpretq_u64_f32(vn89AB), vindex_mask);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
uint32x2_t vl01 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx01]);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
uint32x2_t vl45 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx45]);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
uint32x2_t vl89 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx89]);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
uint32x2_t vl23 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx23]);
uint32x2_t vl67 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx67]);
uint32x2_t vlAB = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidxAB]);
vl01 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx23 >> 32)], vl23, 1);
vl45 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx67 >> 32)], vl67, 1);
vl89 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const uint32x4_t vl0123 = vcombine_u32(vl01, vl23);
const uint32x4_t vl4567 = vcombine_u32(vl45, vl67);
const uint32x4_t vl89AB = vcombine_u32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_u32(vaddq_u32(vl0123, ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_u32(vaddq_u32(vl4567, ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_u32(vaddq_u32(vl89AB, ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
const float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
const float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vp0123 = vfmaq_f32(vc3, vc4, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc3, vc4, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc3, vc4, vt89AB);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vfmsq_f32(vtwo, vp0123, vt0123);
vp4567 = vfmsq_f32(vtwo, vp4567, vt4567);
vp89AB = vfmsq_f32(vtwo, vp89AB, vt89AB);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vts89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t vsmo89AB = vsubq_f32(vs89AB, vone);
const float32x4_t vemo0123 = vfmsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vfmsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vemo89AB = vfmsq_f32(vsmo89AB, vp89AB, vts89AB);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
const float32x4_t vepo89AB = vaddq_f32(vemo89AB, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t vrepo89AB = vrecpeq_f32(vepo89AB);
float32x4_t verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
float32x4_t verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
float32x4_t verepo89AB = vfmsq_f32(vone, vrepo89AB, vepo89AB);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
vrepo89AB = vfmaq_f32(vrepo89AB, vrepo89AB, verepo89AB);
verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
verepo89AB = vfmsq_f32(vone, vrepo89AB, vepo89AB);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
vrepo89AB = vfmaq_f32(vrepo89AB, vrepo89AB, verepo89AB);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
float32x4_t vy89AB = vmulq_f32(vemo89AB, vrepo89AB);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vy89AB = vbslq_f32(vsign_mask, vx89AB, vy89AB);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 11,384
| 43.472656
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-lut8-p4h3ts-nr2fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_lut8_p4h3ts_nr2fma_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.magic_bias);
const uint64x2_t vindex_mask = vreinterpretq_u64_u32(vmovq_n_u32(UINT32_C(0x7)));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.ln2);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vzCDEF = vabsq_f32(vxCDEF);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
vz89AB = vminq_f32(vz89AB, vsat_cutoff);
vzCDEF = vminq_f32(vzCDEF, vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const uint32x4_t ve0123 = vshlq_n_u32(vreinterpretq_u32_f32(vn0123), 20);
const uint64x2_t vidx0123 = vandq_u64(vreinterpretq_u64_f32(vn0123), vindex_mask);
const uint32x4_t ve4567 = vshlq_n_u32(vreinterpretq_u32_f32(vn4567), 20);
const uint64x2_t vidx4567 = vandq_u64(vreinterpretq_u64_f32(vn4567), vindex_mask);
const uint32x4_t ve89AB = vshlq_n_u32(vreinterpretq_u32_f32(vn89AB), 20);
const uint64x2_t vidx89AB = vandq_u64(vreinterpretq_u64_f32(vn89AB), vindex_mask);
const uint32x4_t veCDEF = vshlq_n_u32(vreinterpretq_u32_f32(vnCDEF), 20);
const uint64x2_t vidxCDEF = vandq_u64(vreinterpretq_u64_f32(vnCDEF), vindex_mask);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
uint32x2_t vl01 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx01]);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
uint32x2_t vl45 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx45]);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
uint32x2_t vl89 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx89]);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
uint32x2_t vlCD = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidxCD]);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
uint32x2_t vl23 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx23]);
uint32x2_t vl67 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx67]);
uint32x2_t vlAB = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidxAB]);
uint32x2_t vlEF = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidxEF]);
vl01 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx23 >> 32)], vl23, 1);
vl45 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx67 >> 32)], vl67, 1);
vl89 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidxAB >> 32)], vlAB, 1);
vlCD = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const uint32x4_t vl0123 = vcombine_u32(vl01, vl23);
const uint32x4_t vl4567 = vcombine_u32(vl45, vl67);
const uint32x4_t vl89AB = vcombine_u32(vl89, vlAB);
const uint32x4_t vlCDEF = vcombine_u32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_u32(vaddq_u32(vl0123, ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_u32(vaddq_u32(vl4567, ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_u32(vaddq_u32(vl89AB, ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_u32(vaddq_u32(vlCDEF, veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
const float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
const float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
const float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vp0123 = vfmaq_f32(vc3, vc4, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc3, vc4, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc3, vc4, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc3, vc4, vtCDEF);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vfmsq_f32(vtwo, vp0123, vt0123);
vp4567 = vfmsq_f32(vtwo, vp4567, vt4567);
vp89AB = vfmsq_f32(vtwo, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtwo, vpCDEF, vtCDEF);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vts89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t vsmo89AB = vsubq_f32(vs89AB, vone);
const float32x4_t vtsCDEF = vmulq_f32(vtCDEF, vsCDEF);
const float32x4_t vsmoCDEF = vsubq_f32(vsCDEF, vone);
const float32x4_t vemo0123 = vfmsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vfmsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vemo89AB = vfmsq_f32(vsmo89AB, vp89AB, vts89AB);
const float32x4_t vemoCDEF = vfmsq_f32(vsmoCDEF, vpCDEF, vtsCDEF);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
const float32x4_t vepo89AB = vaddq_f32(vemo89AB, vtwo);
const float32x4_t vepoCDEF = vaddq_f32(vemoCDEF, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t vrepo89AB = vrecpeq_f32(vepo89AB);
float32x4_t vrepoCDEF = vrecpeq_f32(vepoCDEF);
float32x4_t verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
float32x4_t verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
float32x4_t verepo89AB = vfmsq_f32(vone, vrepo89AB, vepo89AB);
float32x4_t verepoCDEF = vfmsq_f32(vone, vrepoCDEF, vepoCDEF);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
vrepo89AB = vfmaq_f32(vrepo89AB, vrepo89AB, verepo89AB);
vrepoCDEF = vfmaq_f32(vrepoCDEF, vrepoCDEF, verepoCDEF);
verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
verepo89AB = vfmsq_f32(vone, vrepo89AB, vepo89AB);
verepoCDEF = vfmsq_f32(vone, vrepoCDEF, vepoCDEF);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
vrepo89AB = vfmaq_f32(vrepo89AB, vrepo89AB, verepo89AB);
vrepoCDEF = vfmaq_f32(vrepoCDEF, vrepoCDEF, verepoCDEF);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
float32x4_t vy89AB = vmulq_f32(vemo89AB, vrepo89AB);
float32x4_t vyCDEF = vmulq_f32(vemoCDEF, vrepoCDEF);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vy89AB = vbslq_f32(vsign_mask, vx89AB, vy89AB);
vyCDEF = vbslq_f32(vsign_mask, vxCDEF, vyCDEF);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 13,329
| 45.445993
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-lut8-p4h3ts-nr2fma-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_lut8_p4h3ts_nr2fma_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.magic_bias);
const uint64x2_t vindex_mask = vreinterpretq_u64_u32(vmovq_n_u32(UINT32_C(0x7)));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.ln2);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 5,463
| 35.671141
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-lut8-p4h3ts-nr2fma-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_lut8_p4h3ts_nr2fma_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.magic_bias);
const uint64x2_t vindex_mask = vreinterpretq_u64_u32(vmovq_n_u32(UINT32_C(0x7)));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.ln2);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_lut8_p4h3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const uint32x4_t ve0123 = vshlq_n_u32(vreinterpretq_u32_f32(vn0123), 20);
const uint64x2_t vidx0123 = vandq_u64(vreinterpretq_u64_f32(vn0123), vindex_mask);
const uint32x4_t ve4567 = vshlq_n_u32(vreinterpretq_u32_f32(vn4567), 20);
const uint64x2_t vidx4567 = vandq_u64(vreinterpretq_u64_f32(vn4567), vindex_mask);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
uint32x2_t vl01 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx01]);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
uint32x2_t vl45 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx45]);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
uint32x2_t vl23 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx23]);
uint32x2_t vl67 = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx67]);
vl01 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx23 >> 32)], vl23, 1);
vl45 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx67 >> 32)], vl67, 1);
const uint32x4_t vl0123 = vcombine_u32(vl01, vl23);
const uint32x4_t vl4567 = vcombine_u32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_u32(vaddq_u32(vl0123, ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_u32(vaddq_u32(vl4567, ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
const float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vp0123 = vfmaq_f32(vc3, vc4, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc3, vc4, vt4567);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp0123 = vfmsq_f32(vtwo, vp0123, vt0123);
vp4567 = vfmsq_f32(vtwo, vp4567, vt4567);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vemo0123 = vfmsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vfmsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
float32x4_t verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const uint32x4_t ve = vshlq_n_u32(vreinterpretq_u32_f32(vn), 20);
const uint64x2_t vidx = vandq_u64(vreinterpretq_u64_f32(vn), vindex_mask);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
uint32x2_t vl_lo = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
uint32x2_t vl_hi = vld1_dup_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_u32(&xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const uint32x4_t vl = vcombine_u32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_u32(vaddq_u32(vl, ve));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc3, vc4, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 9,436
| 40.942222
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-p6h5ts-nr1recps1fma-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_p6h5ts_nr1recps1fma_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vz89AB = vabsq_f32(vx89AB);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
vz89AB = vminq_f32(vz89AB, vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
const float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
const float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc5, vc6, vt89AB);
vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc4, vp89AB, vt89AB);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vfmsq_f32(vtwo, vp0123, vt0123);
vp4567 = vfmsq_f32(vtwo, vp4567, vt4567);
vp89AB = vfmsq_f32(vtwo, vp89AB, vt89AB);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vts89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t vsmo89AB = vsubq_f32(vs89AB, vone);
const float32x4_t vemo0123 = vfmsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vfmsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vemo89AB = vfmsq_f32(vsmo89AB, vp89AB, vts89AB);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
const float32x4_t vepo89AB = vaddq_f32(vemo89AB, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t vrepo89AB = vrecpeq_f32(vepo89AB);
float32x4_t verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
float32x4_t verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
float32x4_t verepo89AB = vrecpsq_f32(vrepo89AB, vepo89AB);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
vrepo89AB = vmulq_f32(vrepo89AB, verepo89AB);
verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
verepo89AB = vfmsq_f32(vone, vrepo89AB, vepo89AB);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
vrepo89AB = vfmaq_f32(vrepo89AB, vrepo89AB, verepo89AB);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
float32x4_t vy89AB = vmulq_f32(vemo89AB, vrepo89AB);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vy89AB = vbslq_f32(vsign_mask, vx89AB, vy89AB);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 8,367
| 36.864253
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-p6h5ts-nr1recps1fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_p6h5ts_nr1recps1fma_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vzCDEF = vabsq_f32(vxCDEF);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
vz89AB = vminq_f32(vz89AB, vsat_cutoff);
vzCDEF = vminq_f32(vzCDEF, vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
const float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
const float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
const float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc5, vc6, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc5, vc6, vtCDEF);
vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc4, vp89AB, vt89AB);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc4, vpCDEF, vtCDEF);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vfmsq_f32(vtwo, vp0123, vt0123);
vp4567 = vfmsq_f32(vtwo, vp4567, vt4567);
vp89AB = vfmsq_f32(vtwo, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtwo, vpCDEF, vtCDEF);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vts89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t vsmo89AB = vsubq_f32(vs89AB, vone);
const float32x4_t vtsCDEF = vmulq_f32(vtCDEF, vsCDEF);
const float32x4_t vsmoCDEF = vsubq_f32(vsCDEF, vone);
const float32x4_t vemo0123 = vfmsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vfmsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vemo89AB = vfmsq_f32(vsmo89AB, vp89AB, vts89AB);
const float32x4_t vemoCDEF = vfmsq_f32(vsmoCDEF, vpCDEF, vtsCDEF);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
const float32x4_t vepo89AB = vaddq_f32(vemo89AB, vtwo);
const float32x4_t vepoCDEF = vaddq_f32(vemoCDEF, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t vrepo89AB = vrecpeq_f32(vepo89AB);
float32x4_t vrepoCDEF = vrecpeq_f32(vepoCDEF);
float32x4_t verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
float32x4_t verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
float32x4_t verepo89AB = vrecpsq_f32(vrepo89AB, vepo89AB);
float32x4_t verepoCDEF = vrecpsq_f32(vrepoCDEF, vepoCDEF);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
vrepo89AB = vmulq_f32(vrepo89AB, verepo89AB);
vrepoCDEF = vmulq_f32(vrepoCDEF, verepoCDEF);
verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
verepo89AB = vfmsq_f32(vone, vrepo89AB, vepo89AB);
verepoCDEF = vfmsq_f32(vone, vrepoCDEF, vepoCDEF);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
vrepo89AB = vfmaq_f32(vrepo89AB, vrepo89AB, verepo89AB);
vrepoCDEF = vfmaq_f32(vrepoCDEF, vrepoCDEF, verepoCDEF);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
float32x4_t vy89AB = vmulq_f32(vemo89AB, vrepo89AB);
float32x4_t vyCDEF = vmulq_f32(vemoCDEF, vrepoCDEF);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vy89AB = vbslq_f32(vsign_mask, vx89AB, vy89AB);
vyCDEF = vbslq_f32(vsign_mask, vxCDEF, vyCDEF);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 9,715
| 38.657143
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-p6h5ts-nr1recps1fma-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_p6h5ts_nr1recps1fma_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 4,237
| 30.392593
| 96
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-p6h5ts-nr1recps1fma-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_p6h5ts_nr1recps1fma_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
const float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp0123 = vfmsq_f32(vtwo, vp0123, vt0123);
vp4567 = vfmsq_f32(vtwo, vp4567, vt4567);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vemo0123 = vfmsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vfmsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
float32x4_t verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 7,016
| 34.619289
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-p6h5ts-nr2fma-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_p6h5ts_nr2fma_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vz89AB = vabsq_f32(vx89AB);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
vz89AB = vminq_f32(vz89AB, vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
const float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
const float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc5, vc6, vt89AB);
vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc4, vp89AB, vt89AB);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vfmsq_f32(vtwo, vp0123, vt0123);
vp4567 = vfmsq_f32(vtwo, vp4567, vt4567);
vp89AB = vfmsq_f32(vtwo, vp89AB, vt89AB);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vts89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t vsmo89AB = vsubq_f32(vs89AB, vone);
const float32x4_t vemo0123 = vfmsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vfmsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vemo89AB = vfmsq_f32(vsmo89AB, vp89AB, vts89AB);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
const float32x4_t vepo89AB = vaddq_f32(vemo89AB, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t vrepo89AB = vrecpeq_f32(vepo89AB);
float32x4_t verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
float32x4_t verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
float32x4_t verepo89AB = vfmsq_f32(vone, vrepo89AB, vepo89AB);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
vrepo89AB = vfmaq_f32(vrepo89AB, vrepo89AB, verepo89AB);
verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
verepo89AB = vfmsq_f32(vone, vrepo89AB, vepo89AB);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
vrepo89AB = vfmaq_f32(vrepo89AB, vrepo89AB, verepo89AB);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
float32x4_t vy89AB = vmulq_f32(vemo89AB, vrepo89AB);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vy89AB = vbslq_f32(vsign_mask, vx89AB, vy89AB);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 8,428
| 37.140271
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-p6h5ts-nr2fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_p6h5ts_nr2fma_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vzCDEF = vabsq_f32(vxCDEF);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
vz89AB = vminq_f32(vz89AB, vsat_cutoff);
vzCDEF = vminq_f32(vzCDEF, vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
const float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
const float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
const float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc5, vc6, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc5, vc6, vtCDEF);
vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc4, vp89AB, vt89AB);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc4, vpCDEF, vtCDEF);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vfmsq_f32(vtwo, vp0123, vt0123);
vp4567 = vfmsq_f32(vtwo, vp4567, vt4567);
vp89AB = vfmsq_f32(vtwo, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtwo, vpCDEF, vtCDEF);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vts89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t vsmo89AB = vsubq_f32(vs89AB, vone);
const float32x4_t vtsCDEF = vmulq_f32(vtCDEF, vsCDEF);
const float32x4_t vsmoCDEF = vsubq_f32(vsCDEF, vone);
const float32x4_t vemo0123 = vfmsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vfmsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vemo89AB = vfmsq_f32(vsmo89AB, vp89AB, vts89AB);
const float32x4_t vemoCDEF = vfmsq_f32(vsmoCDEF, vpCDEF, vtsCDEF);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
const float32x4_t vepo89AB = vaddq_f32(vemo89AB, vtwo);
const float32x4_t vepoCDEF = vaddq_f32(vemoCDEF, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t vrepo89AB = vrecpeq_f32(vepo89AB);
float32x4_t vrepoCDEF = vrecpeq_f32(vepoCDEF);
float32x4_t verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
float32x4_t verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
float32x4_t verepo89AB = vfmsq_f32(vone, vrepo89AB, vepo89AB);
float32x4_t verepoCDEF = vfmsq_f32(vone, vrepoCDEF, vepoCDEF);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
vrepo89AB = vfmaq_f32(vrepo89AB, vrepo89AB, verepo89AB);
vrepoCDEF = vfmaq_f32(vrepoCDEF, vrepoCDEF, verepoCDEF);
verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
verepo89AB = vfmsq_f32(vone, vrepo89AB, vepo89AB);
verepoCDEF = vfmsq_f32(vone, vrepoCDEF, vepoCDEF);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
vrepo89AB = vfmaq_f32(vrepo89AB, vrepo89AB, verepo89AB);
vrepoCDEF = vfmaq_f32(vrepoCDEF, vrepoCDEF, verepoCDEF);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
float32x4_t vy89AB = vmulq_f32(vemo89AB, vrepo89AB);
float32x4_t vyCDEF = vmulq_f32(vemoCDEF, vrepoCDEF);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vy89AB = vbslq_f32(vsign_mask, vx89AB, vy89AB);
vyCDEF = vbslq_f32(vsign_mask, vxCDEF, vyCDEF);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 9,791
| 38.967347
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-p6h5ts-nr2fma-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_p6h5ts_nr2fma_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 4,253
| 30.511111
| 96
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-p6h5ts-nr2fma-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_p6h5ts_nr2fma_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
const float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp0123 = vfmsq_f32(vtwo, vp0123, vt0123);
vp4567 = vfmsq_f32(vtwo, vp4567, vt4567);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vemo0123 = vfmsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vfmsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
float32x4_t verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
verepo0123 = vfmsq_f32(vone, vrepo0123, vepo0123);
verepo4567 = vfmsq_f32(vone, vrepo4567, vepo4567);
vrepo0123 = vfmaq_f32(vrepo0123, vrepo0123, verepo0123);
vrepo4567 = vfmaq_f32(vrepo4567, vrepo4567, verepo4567);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 7,062
| 34.852792
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-p6h5ts-nr2recps-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_p6h5ts_nr2recps_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vz89AB = vabsq_f32(vx89AB);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
vz89AB = vminq_f32(vz89AB, vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
const float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
const float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc5, vc6, vt89AB);
vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc4, vp89AB, vt89AB);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vfmsq_f32(vtwo, vp0123, vt0123);
vp4567 = vfmsq_f32(vtwo, vp4567, vt4567);
vp89AB = vfmsq_f32(vtwo, vp89AB, vt89AB);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vts89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t vsmo89AB = vsubq_f32(vs89AB, vone);
const float32x4_t vemo0123 = vfmsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vfmsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vemo89AB = vfmsq_f32(vsmo89AB, vp89AB, vts89AB);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
const float32x4_t vepo89AB = vaddq_f32(vemo89AB, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t vrepo89AB = vrecpeq_f32(vepo89AB);
float32x4_t verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
float32x4_t verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
float32x4_t verepo89AB = vrecpsq_f32(vrepo89AB, vepo89AB);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
vrepo89AB = vmulq_f32(vrepo89AB, verepo89AB);
verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
verepo89AB = vrecpsq_f32(vrepo89AB, vepo89AB);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
vrepo89AB = vmulq_f32(vrepo89AB, verepo89AB);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
float32x4_t vy89AB = vmulq_f32(vemo89AB, vrepo89AB);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vy89AB = vbslq_f32(vsign_mask, vx89AB, vy89AB);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 8,296
| 36.542986
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-p6h5ts-nr2recps-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_p6h5ts_nr2recps_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vzCDEF = vabsq_f32(vxCDEF);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
vz89AB = vminq_f32(vz89AB, vsat_cutoff);
vzCDEF = vminq_f32(vzCDEF, vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
const float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
const float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
const float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc5, vc6, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc5, vc6, vtCDEF);
vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc4, vp89AB, vt89AB);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc4, vpCDEF, vtCDEF);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vfmsq_f32(vtwo, vp0123, vt0123);
vp4567 = vfmsq_f32(vtwo, vp4567, vt4567);
vp89AB = vfmsq_f32(vtwo, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtwo, vpCDEF, vtCDEF);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vts89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t vsmo89AB = vsubq_f32(vs89AB, vone);
const float32x4_t vtsCDEF = vmulq_f32(vtCDEF, vsCDEF);
const float32x4_t vsmoCDEF = vsubq_f32(vsCDEF, vone);
const float32x4_t vemo0123 = vfmsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vfmsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vemo89AB = vfmsq_f32(vsmo89AB, vp89AB, vts89AB);
const float32x4_t vemoCDEF = vfmsq_f32(vsmoCDEF, vpCDEF, vtsCDEF);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
const float32x4_t vepo89AB = vaddq_f32(vemo89AB, vtwo);
const float32x4_t vepoCDEF = vaddq_f32(vemoCDEF, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t vrepo89AB = vrecpeq_f32(vepo89AB);
float32x4_t vrepoCDEF = vrecpeq_f32(vepoCDEF);
float32x4_t verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
float32x4_t verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
float32x4_t verepo89AB = vrecpsq_f32(vrepo89AB, vepo89AB);
float32x4_t verepoCDEF = vrecpsq_f32(vrepoCDEF, vepoCDEF);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
vrepo89AB = vmulq_f32(vrepo89AB, verepo89AB);
vrepoCDEF = vmulq_f32(vrepoCDEF, verepoCDEF);
verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
verepo89AB = vrecpsq_f32(vrepo89AB, vepo89AB);
verepoCDEF = vrecpsq_f32(vrepoCDEF, vepoCDEF);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
vrepo89AB = vmulq_f32(vrepo89AB, verepo89AB);
vrepoCDEF = vmulq_f32(vrepoCDEF, verepoCDEF);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
float32x4_t vy89AB = vmulq_f32(vemo89AB, vrepo89AB);
float32x4_t vyCDEF = vmulq_f32(vemoCDEF, vrepoCDEF);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vy89AB = vbslq_f32(vsign_mask, vx89AB, vy89AB);
vyCDEF = vbslq_f32(vsign_mask, vxCDEF, vyCDEF);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 9,629
| 38.306122
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-p6h5ts-nr2recps-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_p6h5ts_nr2recps_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 4,211
| 30.2
| 96
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-neonfma-expm1minus-rr1-p6h5ts-nr2recps-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
#include <xnnpack/vunary.h>
#include <xnnpack/microparams.h>
void xnn_f32_vtanh_ukernel__neonfma_expm1minus_rr1_p6h5ts_nr2recps_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.sat_cutoff);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.minus_log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.magic_bias);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_expm1minus_rr1_p6h5.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vz0123 = vabsq_f32(vx0123);
float32x4_t vz4567 = vabsq_f32(vx4567);
vz0123 = vminq_f32(vz0123, vsat_cutoff);
vz4567 = vminq_f32(vz4567, vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
const float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
const float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp0123 = vfmsq_f32(vtwo, vp0123, vt0123);
vp4567 = vfmsq_f32(vtwo, vp4567, vt4567);
const float32x4_t vts0123 = vmulq_f32(vt0123, vs0123);
const float32x4_t vsmo0123 = vsubq_f32(vs0123, vone);
const float32x4_t vts4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t vsmo4567 = vsubq_f32(vs4567, vone);
const float32x4_t vemo0123 = vfmsq_f32(vsmo0123, vp0123, vts0123);
const float32x4_t vemo4567 = vfmsq_f32(vsmo4567, vp4567, vts4567);
const float32x4_t vepo0123 = vaddq_f32(vemo0123, vtwo);
const float32x4_t vepo4567 = vaddq_f32(vemo4567, vtwo);
float32x4_t vrepo0123 = vrecpeq_f32(vepo0123);
float32x4_t vrepo4567 = vrecpeq_f32(vepo4567);
float32x4_t verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
float32x4_t verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
verepo0123 = vrecpsq_f32(vrepo0123, vepo0123);
verepo4567 = vrecpsq_f32(vrepo4567, vepo4567);
vrepo0123 = vmulq_f32(vrepo0123, verepo0123);
vrepo4567 = vmulq_f32(vrepo4567, verepo4567);
float32x4_t vy0123 = vmulq_f32(vemo0123, vrepo0123);
float32x4_t vy4567 = vmulq_f32(vemo4567, vrepo4567);
vy0123 = vbslq_f32(vsign_mask, vx0123, vy0123);
vy4567 = vbslq_f32(vsign_mask, vx4567, vy4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vz = vabsq_f32(vx);
vz = vminq_f32(vz, vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
float32x4_t vy = vmulq_f32(vemo, vrepo);
vy = vbslq_f32(vsign_mask, vx, vy);
float32x2_t vy_low = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_low); output += 2;
vy_low = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_low, 0);
}
}
}
| 6,960
| 34.335025
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-scalar-expm1minus-rr1-lut8-p4h3ts-div-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__scalar_expm1minus_rr1_lut8_p4h3ts_div_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_lut8_p4h3.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_lut8_p4h3.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_lut8_p4h3.magic_bias;
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = params->scalar_expm1minus_rr1_lut8_p4h3.ln2;
const float vc4 = params->scalar_expm1minus_rr1_lut8_p4h3.c4;
const float vc3 = params->scalar_expm1minus_rr1_lut8_p4h3.c3;
const float vc2 = params->scalar_expm1minus_rr1_lut8_p4h3.c2;
const float vminus_two = params->scalar_expm1minus_rr1_lut8_p4h3.minus_two;
const float vone = params->scalar_expm1minus_rr1_lut8_p4h3.one;
do {
const float vx = *input++;
float vz = fabsf(vx);
vz = math_pmin_f32(vz, vsat_cutoff);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
uint32_t ve = vb << 20;
ve += vl;
const float vs = uint32_as_float(ve);
const float vt = vn * vln2 + vz;
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 2,396
| 27.535714
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-scalar-expm1minus-rr1-lut8-p4h3ts-div-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__scalar_expm1minus_rr1_lut8_p4h3ts_div_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_lut8_p4h3.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_lut8_p4h3.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_lut8_p4h3.magic_bias;
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = params->scalar_expm1minus_rr1_lut8_p4h3.ln2;
const float vc4 = params->scalar_expm1minus_rr1_lut8_p4h3.c4;
const float vc3 = params->scalar_expm1minus_rr1_lut8_p4h3.c3;
const float vc2 = params->scalar_expm1minus_rr1_lut8_p4h3.c2;
const float vminus_two = params->scalar_expm1minus_rr1_lut8_p4h3.minus_two;
const float vone = params->scalar_expm1minus_rr1_lut8_p4h3.one;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
float vz0 = fabsf(vx0);
float vz1 = fabsf(vx1);
vz0 = math_pmin_f32(vz0, vsat_cutoff);
vz1 = math_pmin_f32(vz1, vsat_cutoff);
float vn0 = vz0 * vminus_log2e + vmagic_bias;
float vn1 = vz1 * vminus_log2e + vmagic_bias;
const uint32_t vb0 = float_as_uint32(vn0);
vn0 -= vmagic_bias;
const uint32_t vb1 = float_as_uint32(vn1);
vn1 -= vmagic_bias;
const uint32_t vidx0 = vb0 & vindex_mask;
const uint32_t vidx1 = vb1 & vindex_mask;
const uint32_t vl0 = xnn_table_exp2minus_k_over_8[vidx0];
uint32_t ve0 = vb0 << 20;
const uint32_t vl1 = xnn_table_exp2minus_k_over_8[vidx1];
uint32_t ve1 = vb1 << 20;
ve0 += vl0;
ve1 += vl1;
const float vt0 = vn0 * vln2 + vz0;
const float vs0 = uint32_as_float(ve0);
const float vt1 = vn1 * vln2 + vz1;
const float vs1 = uint32_as_float(ve1);
float vp0 = vc4 * vt0 + vc3;
float vp1 = vc4 * vt1 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp0 = vp0 * vt0 + vminus_two;
vp1 = vp1 * vt1 + vminus_two;
const float vts0 = vt0 * vs0;
const float vsmo0 = vs0 - vone;
const float vts1 = vt1 * vs1;
const float vsmo1 = vs1 - vone;
const float vemo0 = vp0 * vts0 + vsmo0;
const float vemo1 = vp1 * vts1 + vsmo1;
const float vepo0 = vemo0 - vminus_two;
const float vepo1 = vemo1 - vminus_two;
float vy0 = vemo0 / vepo0;
float vy1 = vemo1 / vepo1;
vy0 = copysignf(vy0, vx0);
vy1 = copysignf(vy1, vx1);
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
float vz = fabsf(vx);
vz = math_pmin_f32(vz, vsat_cutoff);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
uint32_t ve = vb << 20;
ve += vl;
const float vs = uint32_as_float(ve);
const float vt = vn * vln2 + vz;
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output = vy;
}
}
| 4,081
| 27.151724
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-scalar-expm1minus-rr1-lut8-p4h3ts-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__scalar_expm1minus_rr1_lut8_p4h3ts_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_lut8_p4h3.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_lut8_p4h3.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_lut8_p4h3.magic_bias;
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = params->scalar_expm1minus_rr1_lut8_p4h3.ln2;
const float vc4 = params->scalar_expm1minus_rr1_lut8_p4h3.c4;
const float vc3 = params->scalar_expm1minus_rr1_lut8_p4h3.c3;
const float vc2 = params->scalar_expm1minus_rr1_lut8_p4h3.c2;
const float vminus_two = params->scalar_expm1minus_rr1_lut8_p4h3.minus_two;
const float vone = params->scalar_expm1minus_rr1_lut8_p4h3.one;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
float vz0 = fabsf(vx0);
float vz1 = fabsf(vx1);
float vz2 = fabsf(vx2);
float vz3 = fabsf(vx3);
vz0 = math_pmin_f32(vz0, vsat_cutoff);
vz1 = math_pmin_f32(vz1, vsat_cutoff);
vz2 = math_pmin_f32(vz2, vsat_cutoff);
vz3 = math_pmin_f32(vz3, vsat_cutoff);
float vn0 = vz0 * vminus_log2e + vmagic_bias;
float vn1 = vz1 * vminus_log2e + vmagic_bias;
float vn2 = vz2 * vminus_log2e + vmagic_bias;
float vn3 = vz3 * vminus_log2e + vmagic_bias;
const uint32_t vb0 = float_as_uint32(vn0);
vn0 -= vmagic_bias;
const uint32_t vb1 = float_as_uint32(vn1);
vn1 -= vmagic_bias;
const uint32_t vb2 = float_as_uint32(vn2);
vn2 -= vmagic_bias;
const uint32_t vb3 = float_as_uint32(vn3);
vn3 -= vmagic_bias;
const uint32_t vidx0 = vb0 & vindex_mask;
const uint32_t vidx1 = vb1 & vindex_mask;
const uint32_t vidx2 = vb2 & vindex_mask;
const uint32_t vidx3 = vb3 & vindex_mask;
const uint32_t vl0 = xnn_table_exp2minus_k_over_8[vidx0];
uint32_t ve0 = vb0 << 20;
const uint32_t vl1 = xnn_table_exp2minus_k_over_8[vidx1];
uint32_t ve1 = vb1 << 20;
const uint32_t vl2 = xnn_table_exp2minus_k_over_8[vidx2];
uint32_t ve2 = vb2 << 20;
const uint32_t vl3 = xnn_table_exp2minus_k_over_8[vidx3];
uint32_t ve3 = vb3 << 20;
ve0 += vl0;
ve1 += vl1;
ve2 += vl2;
ve3 += vl3;
const float vt0 = vn0 * vln2 + vz0;
const float vs0 = uint32_as_float(ve0);
const float vt1 = vn1 * vln2 + vz1;
const float vs1 = uint32_as_float(ve1);
const float vt2 = vn2 * vln2 + vz2;
const float vs2 = uint32_as_float(ve2);
const float vt3 = vn3 * vln2 + vz3;
const float vs3 = uint32_as_float(ve3);
float vp0 = vc4 * vt0 + vc3;
float vp1 = vc4 * vt1 + vc3;
float vp2 = vc4 * vt2 + vc3;
float vp3 = vc4 * vt3 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp2 = vp2 * vt2 + vc2;
vp3 = vp3 * vt3 + vc2;
vp0 = vp0 * vt0 + vminus_two;
vp1 = vp1 * vt1 + vminus_two;
vp2 = vp2 * vt2 + vminus_two;
vp3 = vp3 * vt3 + vminus_two;
const float vts0 = vt0 * vs0;
const float vsmo0 = vs0 - vone;
const float vts1 = vt1 * vs1;
const float vsmo1 = vs1 - vone;
const float vts2 = vt2 * vs2;
const float vsmo2 = vs2 - vone;
const float vts3 = vt3 * vs3;
const float vsmo3 = vs3 - vone;
const float vemo0 = vp0 * vts0 + vsmo0;
const float vemo1 = vp1 * vts1 + vsmo1;
const float vemo2 = vp2 * vts2 + vsmo2;
const float vemo3 = vp3 * vts3 + vsmo3;
const float vepo0 = vemo0 - vminus_two;
const float vepo1 = vemo1 - vminus_two;
const float vepo2 = vemo2 - vminus_two;
const float vepo3 = vemo3 - vminus_two;
float vy0 = vemo0 / vepo0;
float vy1 = vemo1 / vepo1;
float vy2 = vemo2 / vepo2;
float vy3 = vemo3 / vepo3;
vy0 = copysignf(vy0, vx0);
vy1 = copysignf(vy1, vx1);
vy2 = copysignf(vy2, vx2);
vy3 = copysignf(vy3, vx3);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
float vz = fabsf(vx);
vz = math_pmin_f32(vz, vsat_cutoff);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
uint32_t ve = vb << 20;
ve += vl;
const float vs = uint32_as_float(ve);
const float vt = vn * vln2 + vz;
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,789
| 29
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-scalar-expm1minus-rr1-p6h5ts-div-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__scalar_expm1minus_rr1_p6h5ts_div_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_p6h5.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_p6h5.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_p6h5.magic_bias;
const float vln2 = params->scalar_expm1minus_rr1_p6h5.ln2;
const float vc6 = params->scalar_expm1minus_rr1_p6h5.c6;
const float vc5 = params->scalar_expm1minus_rr1_p6h5.c5;
const float vc4 = params->scalar_expm1minus_rr1_p6h5.c4;
const float vc3 = params->scalar_expm1minus_rr1_p6h5.c3;
const float vc2 = params->scalar_expm1minus_rr1_p6h5.c2;
const float vminus_two = params->scalar_expm1minus_rr1_p6h5.minus_two;
const float vone = params->scalar_expm1minus_rr1_p6h5.one;
do {
const float vx = *input++;
float vz = fabsf(vx);
vz = math_pmin_f32(vz, vsat_cutoff);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t ve = vb << 23;
const float vs = uint32_as_float(ve);
const float vt = vn * vln2 + vz;
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 2,285
| 26.878049
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-scalar-expm1minus-rr1-p6h5ts-div-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__scalar_expm1minus_rr1_p6h5ts_div_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_p6h5.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_p6h5.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_p6h5.magic_bias;
const float vln2 = params->scalar_expm1minus_rr1_p6h5.ln2;
const float vc6 = params->scalar_expm1minus_rr1_p6h5.c6;
const float vc5 = params->scalar_expm1minus_rr1_p6h5.c5;
const float vc4 = params->scalar_expm1minus_rr1_p6h5.c4;
const float vc3 = params->scalar_expm1minus_rr1_p6h5.c3;
const float vc2 = params->scalar_expm1minus_rr1_p6h5.c2;
const float vminus_two = params->scalar_expm1minus_rr1_p6h5.minus_two;
const float vone = params->scalar_expm1minus_rr1_p6h5.one;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
float vz0 = fabsf(vx0);
float vz1 = fabsf(vx1);
vz0 = math_pmin_f32(vz0, vsat_cutoff);
vz1 = math_pmin_f32(vz1, vsat_cutoff);
float vn0 = vz0 * vminus_log2e + vmagic_bias;
float vn1 = vz1 * vminus_log2e + vmagic_bias;
const uint32_t vb0 = float_as_uint32(vn0);
vn0 -= vmagic_bias;
const uint32_t vb1 = float_as_uint32(vn1);
vn1 -= vmagic_bias;
const uint32_t ve0 = vb0 << 23;
const uint32_t ve1 = vb1 << 23;
const float vt0 = vn0 * vln2 + vz0;
const float vs0 = uint32_as_float(ve0);
const float vt1 = vn1 * vln2 + vz1;
const float vs1 = uint32_as_float(ve1);
float vp0 = vc6 * vt0 + vc5;
float vp1 = vc6 * vt1 + vc5;
vp0 = vp0 * vt0 + vc4;
vp1 = vp1 * vt1 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp0 = vp0 * vt0 + vminus_two;
vp1 = vp1 * vt1 + vminus_two;
const float vts0 = vt0 * vs0;
const float vsmo0 = vs0 - vone;
const float vts1 = vt1 * vs1;
const float vsmo1 = vs1 - vone;
const float vemo0 = vp0 * vts0 + vsmo0;
const float vemo1 = vp1 * vts1 + vsmo1;
const float vepo0 = vemo0 - vminus_two;
const float vepo1 = vemo1 - vminus_two;
float vy0 = vemo0 / vepo0;
float vy1 = vemo1 / vepo1;
vy0 = copysignf(vy0, vx0);
vy1 = copysignf(vy1, vx1);
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
float vz = fabsf(vx);
vz = math_pmin_f32(vz, vsat_cutoff);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t ve = vb << 23;
const float vs = uint32_as_float(ve);
const float vt = vn * vln2 + vz;
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output = vy;
}
}
| 3,840
| 26.633094
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-scalar-expm1minus-rr1-p6h5ts-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__scalar_expm1minus_rr1_p6h5ts_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsat_cutoff = params->scalar_expm1minus_rr1_p6h5.sat_cutoff;
const float vminus_log2e = params->scalar_expm1minus_rr1_p6h5.minus_log2e;
const float vmagic_bias = params->scalar_expm1minus_rr1_p6h5.magic_bias;
const float vln2 = params->scalar_expm1minus_rr1_p6h5.ln2;
const float vc6 = params->scalar_expm1minus_rr1_p6h5.c6;
const float vc5 = params->scalar_expm1minus_rr1_p6h5.c5;
const float vc4 = params->scalar_expm1minus_rr1_p6h5.c4;
const float vc3 = params->scalar_expm1minus_rr1_p6h5.c3;
const float vc2 = params->scalar_expm1minus_rr1_p6h5.c2;
const float vminus_two = params->scalar_expm1minus_rr1_p6h5.minus_two;
const float vone = params->scalar_expm1minus_rr1_p6h5.one;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
float vz0 = fabsf(vx0);
float vz1 = fabsf(vx1);
float vz2 = fabsf(vx2);
float vz3 = fabsf(vx3);
vz0 = math_pmin_f32(vz0, vsat_cutoff);
vz1 = math_pmin_f32(vz1, vsat_cutoff);
vz2 = math_pmin_f32(vz2, vsat_cutoff);
vz3 = math_pmin_f32(vz3, vsat_cutoff);
float vn0 = vz0 * vminus_log2e + vmagic_bias;
float vn1 = vz1 * vminus_log2e + vmagic_bias;
float vn2 = vz2 * vminus_log2e + vmagic_bias;
float vn3 = vz3 * vminus_log2e + vmagic_bias;
const uint32_t vb0 = float_as_uint32(vn0);
vn0 -= vmagic_bias;
const uint32_t vb1 = float_as_uint32(vn1);
vn1 -= vmagic_bias;
const uint32_t vb2 = float_as_uint32(vn2);
vn2 -= vmagic_bias;
const uint32_t vb3 = float_as_uint32(vn3);
vn3 -= vmagic_bias;
const uint32_t ve0 = vb0 << 23;
const uint32_t ve1 = vb1 << 23;
const uint32_t ve2 = vb2 << 23;
const uint32_t ve3 = vb3 << 23;
const float vt0 = vn0 * vln2 + vz0;
const float vs0 = uint32_as_float(ve0);
const float vt1 = vn1 * vln2 + vz1;
const float vs1 = uint32_as_float(ve1);
const float vt2 = vn2 * vln2 + vz2;
const float vs2 = uint32_as_float(ve2);
const float vt3 = vn3 * vln2 + vz3;
const float vs3 = uint32_as_float(ve3);
float vp0 = vc6 * vt0 + vc5;
float vp1 = vc6 * vt1 + vc5;
float vp2 = vc6 * vt2 + vc5;
float vp3 = vc6 * vt3 + vc5;
vp0 = vp0 * vt0 + vc4;
vp1 = vp1 * vt1 + vc4;
vp2 = vp2 * vt2 + vc4;
vp3 = vp3 * vt3 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp2 = vp2 * vt2 + vc3;
vp3 = vp3 * vt3 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp2 = vp2 * vt2 + vc2;
vp3 = vp3 * vt3 + vc2;
vp0 = vp0 * vt0 + vminus_two;
vp1 = vp1 * vt1 + vminus_two;
vp2 = vp2 * vt2 + vminus_two;
vp3 = vp3 * vt3 + vminus_two;
const float vts0 = vt0 * vs0;
const float vsmo0 = vs0 - vone;
const float vts1 = vt1 * vs1;
const float vsmo1 = vs1 - vone;
const float vts2 = vt2 * vs2;
const float vsmo2 = vs2 - vone;
const float vts3 = vt3 * vs3;
const float vsmo3 = vs3 - vone;
const float vemo0 = vp0 * vts0 + vsmo0;
const float vemo1 = vp1 * vts1 + vsmo1;
const float vemo2 = vp2 * vts2 + vsmo2;
const float vemo3 = vp3 * vts3 + vsmo3;
const float vepo0 = vemo0 - vminus_two;
const float vepo1 = vemo1 - vminus_two;
const float vepo2 = vemo2 - vminus_two;
const float vepo3 = vemo3 - vminus_two;
float vy0 = vemo0 / vepo0;
float vy1 = vemo1 / vepo1;
float vy2 = vemo2 / vepo2;
float vy3 = vemo3 / vepo3;
vy0 = copysignf(vy0, vx0);
vy1 = copysignf(vy1, vx1);
vy2 = copysignf(vy2, vx2);
vy3 = copysignf(vy3, vx3);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
float vz = fabsf(vx);
vz = math_pmin_f32(vz, vsat_cutoff);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t vb = float_as_uint32(vn);
vn -= vmagic_bias;
const uint32_t ve = vb << 23;
const float vs = uint32_as_float(ve);
const float vt = vn * vln2 + vz;
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
const float vepo = vemo - vminus_two;
float vy = vemo / vepo;
vy = copysignf(vy, vx);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,418
| 28.291892
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-lut8-p4h3ts-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_lut8_p4h3ts_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m128i vindex_mask = _mm_load_si128((const __m128i*) params->sse_expm1minus_rr1_lut8_p4h3.index_mask);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_ln2);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
input += 12;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
__m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
vz89AB = _mm_max_ps(vsat_cutoff, vz89AB);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
const __m128i ve0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 20);
const __m128i ve4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 20);
const __m128i ve89AB = _mm_slli_epi32(_mm_castps_si128(vn89AB), 20);
#if XNN_ARCH_X86_64
__m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
__m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
__m128i vidx89AB = _mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask);
const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
vidx0123 = _mm_unpackhi_epi64(vidx0123, vidx0123);
const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
vidx4567 = _mm_unpackhi_epi64(vidx4567, vidx4567);
const uint64_t vidx89 = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
vidx89AB = _mm_unpackhi_epi64(vidx89AB, vidx89AB);
const uint64_t vidx23 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
const uint64_t vidx67 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
const uint64_t vidxAB = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx01]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx01 >> 32)]);
const __m128i vl4 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx45]);
const __m128i vl5 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx45 >> 32)]);
const __m128i vl8 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx89]);
const __m128i vl9 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx89 >> 32)]);
const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
const __m128i vl89 = _mm_unpacklo_epi32(vl8, vl9);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx23]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx23 >> 32)]);
const __m128i vl6 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx67]);
const __m128i vl7 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx67 >> 32)]);
const __m128i vlA = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxAB]);
const __m128i vlB = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxAB >> 32)]);
const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
const __m128i vlAB = _mm_unpacklo_epi32(vlA, vlB);
#else
const __m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
const __m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
const __m128i vidx89AB = _mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
const uint32_t vidx8 = (uint32_t) _mm_cvtsi128_si32(vidx89AB);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const __m128i vl4 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx4]);
const __m128i vl8 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx8]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
const uint32_t vidx9 = (uint32_t) _mm_extract_epi16(vidx89AB, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl5 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx5]);
const __m128i vl9 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx9]);
const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
const __m128i vl89 = _mm_unpacklo_epi32(vl8, vl9);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
const uint32_t vidxA = (uint32_t) _mm_extract_epi16(vidx89AB, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const __m128i vl6 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx6]);
const __m128i vlA = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidxA]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
const uint32_t vidxB = (uint32_t) _mm_extract_epi16(vidx89AB, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl7 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx7]);
const __m128i vlB = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidxB]);
const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
const __m128i vlAB = _mm_unpacklo_epi32(vlA, vlB);
#endif
const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
const __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ve0123));
const __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ve4567));
const __m128 vs89AB = _mm_castsi128_ps(_mm_add_epi32(vl89AB, ve89AB));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc4, vt0123), vc3);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc4, vt4567), vc3);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc4, vt89AB), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
const __m128 vepo89AB = _mm_sub_ps(vemo89AB, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
__m128 vy89AB = _mm_div_ps(vemo89AB, vepo89AB);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#endif
const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#endif
const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 16,225
| 46.306122
| 111
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-lut8-p4h3ts-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_lut8_p4h3ts_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m128i vindex_mask = _mm_load_si128((const __m128i*) params->sse_expm1minus_rr1_lut8_p4h3.index_mask);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_ln2);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
input += 16;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
__m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
__m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
vz89AB = _mm_max_ps(vsat_cutoff, vz89AB);
vzCDEF = _mm_max_ps(vsat_cutoff, vzCDEF);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
const __m128i ve0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 20);
const __m128i ve4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 20);
const __m128i ve89AB = _mm_slli_epi32(_mm_castps_si128(vn89AB), 20);
const __m128i veCDEF = _mm_slli_epi32(_mm_castps_si128(vnCDEF), 20);
#if XNN_ARCH_X86_64
__m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
__m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
__m128i vidx89AB = _mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask);
__m128i vidxCDEF = _mm_and_si128(_mm_castps_si128(vnCDEF), vindex_mask);
const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
vidx0123 = _mm_unpackhi_epi64(vidx0123, vidx0123);
const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
vidx4567 = _mm_unpackhi_epi64(vidx4567, vidx4567);
const uint64_t vidx89 = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
vidx89AB = _mm_unpackhi_epi64(vidx89AB, vidx89AB);
const uint64_t vidxCD = (uint64_t) _mm_cvtsi128_si64(vidxCDEF);
vidxCDEF = _mm_unpackhi_epi64(vidxCDEF, vidxCDEF);
const uint64_t vidx23 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
const uint64_t vidx67 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
const uint64_t vidxAB = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
const uint64_t vidxEF = (uint64_t) _mm_cvtsi128_si64(vidxCDEF);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx01]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx01 >> 32)]);
const __m128i vl4 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx45]);
const __m128i vl5 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx45 >> 32)]);
const __m128i vl8 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx89]);
const __m128i vl9 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx89 >> 32)]);
const __m128i vlC = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxCD]);
const __m128i vlD = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxCD >> 32)]);
const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
const __m128i vl89 = _mm_unpacklo_epi32(vl8, vl9);
const __m128i vlCD = _mm_unpacklo_epi32(vlC, vlD);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx23]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx23 >> 32)]);
const __m128i vl6 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx67]);
const __m128i vl7 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx67 >> 32)]);
const __m128i vlA = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxAB]);
const __m128i vlB = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxAB >> 32)]);
const __m128i vlE = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxEF]);
const __m128i vlF = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxEF >> 32)]);
const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
const __m128i vlAB = _mm_unpacklo_epi32(vlA, vlB);
const __m128i vlEF = _mm_unpacklo_epi32(vlE, vlF);
#else
const __m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
const __m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
const __m128i vidx89AB = _mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask);
const __m128i vidxCDEF = _mm_and_si128(_mm_castps_si128(vnCDEF), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
const uint32_t vidx8 = (uint32_t) _mm_cvtsi128_si32(vidx89AB);
const uint32_t vidxC = (uint32_t) _mm_cvtsi128_si32(vidxCDEF);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const __m128i vl4 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx4]);
const __m128i vl8 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx8]);
const __m128i vlC = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidxC]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
const uint32_t vidx9 = (uint32_t) _mm_extract_epi16(vidx89AB, 2);
const uint32_t vidxD = (uint32_t) _mm_extract_epi16(vidxCDEF, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl5 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx5]);
const __m128i vl9 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx9]);
const __m128i vlD = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidxD]);
const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
const __m128i vl89 = _mm_unpacklo_epi32(vl8, vl9);
const __m128i vlCD = _mm_unpacklo_epi32(vlC, vlD);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
const uint32_t vidxA = (uint32_t) _mm_extract_epi16(vidx89AB, 4);
const uint32_t vidxE = (uint32_t) _mm_extract_epi16(vidxCDEF, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const __m128i vl6 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx6]);
const __m128i vlA = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidxA]);
const __m128i vlE = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidxE]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
const uint32_t vidxB = (uint32_t) _mm_extract_epi16(vidx89AB, 6);
const uint32_t vidxF = (uint32_t) _mm_extract_epi16(vidxCDEF, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl7 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx7]);
const __m128i vlB = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidxB]);
const __m128i vlF = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidxF]);
const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
const __m128i vlAB = _mm_unpacklo_epi32(vlA, vlB);
const __m128i vlEF = _mm_unpacklo_epi32(vlE, vlF);
#endif
const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
const __m128i vlCDEF = _mm_unpacklo_epi64(vlCD, vlEF);
const __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ve0123));
const __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ve4567));
const __m128 vs89AB = _mm_castsi128_ps(_mm_add_epi32(vl89AB, ve89AB));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_add_epi32(vlCDEF, veCDEF));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc4, vt0123), vc3);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc4, vt4567), vc3);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc4, vt89AB), vc3);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc4, vtCDEF), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
const __m128 vepo89AB = _mm_sub_ps(vemo89AB, vminus_two);
const __m128 vepoCDEF = _mm_sub_ps(vemoCDEF, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
__m128 vy89AB = _mm_div_ps(vemo89AB, vepo89AB);
__m128 vyCDEF = _mm_div_ps(vemoCDEF, vepoCDEF);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#endif
const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#endif
const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 19,062
| 48.643229
| 111
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-lut8-p4h3ts-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_lut8_p4h3ts_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m128i vindex_mask = _mm_load_si128((const __m128i*) params->sse_expm1minus_rr1_lut8_p4h3.index_mask);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_ln2);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#endif
const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#endif
const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 7,542
| 40.674033
| 111
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-lut8-p4h3ts-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_lut8_p4h3ts_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m128i vindex_mask = _mm_load_si128((const __m128i*) params->sse_expm1minus_rr1_lut8_p4h3.index_mask);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_ln2);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
const __m128i ve0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 20);
const __m128i ve4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 20);
#if XNN_ARCH_X86_64
__m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
__m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
vidx0123 = _mm_unpackhi_epi64(vidx0123, vidx0123);
const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
vidx4567 = _mm_unpackhi_epi64(vidx4567, vidx4567);
const uint64_t vidx23 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
const uint64_t vidx67 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx01]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx01 >> 32)]);
const __m128i vl4 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx45]);
const __m128i vl5 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx45 >> 32)]);
const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx23]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx23 >> 32)]);
const __m128i vl6 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx67]);
const __m128i vl7 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx67 >> 32)]);
const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
#else
const __m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
const __m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const __m128i vl4 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx4]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl5 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx5]);
const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const __m128i vl6 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx6]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl7 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx7]);
const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
#endif
const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
const __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ve0123));
const __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ve4567));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc4, vt0123), vc3);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc4, vt4567), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#endif
const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#endif
const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 13,385
| 43.324503
| 111
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-p6h5ts-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_p6h5ts_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
input += 12;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
__m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
vz89AB = _mm_max_ps(vsat_cutoff, vz89AB);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
const __m128 vepo89AB = _mm_sub_ps(vemo89AB, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
__m128 vy89AB = _mm_div_ps(vemo89AB, vepo89AB);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 7,742
| 36.770732
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-p6h5ts-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_p6h5ts_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
input += 16;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
__m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
__m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
vz89AB = _mm_max_ps(vsat_cutoff, vz89AB);
vzCDEF = _mm_max_ps(vsat_cutoff, vzCDEF);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
const __m128 vepo89AB = _mm_sub_ps(vemo89AB, vminus_two);
const __m128 vepoCDEF = _mm_sub_ps(vemoCDEF, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
__m128 vy89AB = _mm_div_ps(vemo89AB, vepo89AB);
__m128 vyCDEF = _mm_div_ps(vemoCDEF, vepoCDEF);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 8,936
| 38.72
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-p6h5ts-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_p6h5ts_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 4,051
| 30.905512
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-p6h5ts-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_p6h5ts_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 6,545
| 34.383784
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-p6h5ts-nr1-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_p6h5ts_nr1_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
input += 12;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
const __m128 vm89AB = _mm_cmple_ps(vz89AB, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
const __m128 vepo89AB = _mm_sub_ps(vminus_two, vemo89AB);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
__m128 vrepo89AB = _mm_rcp_ps(vepo89AB);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_add_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
__m128 vy89AB = _mm_mul_ps(vemo89AB, vrepo89AB);
vy0123 = _mm_or_ps(_mm_andnot_ps(vm0123, vy0123), _mm_and_ps(vminus_one, vm0123));
vy4567 = _mm_or_ps(_mm_andnot_ps(vm4567, vy4567), _mm_and_ps(vminus_one, vm4567));
vy89AB = _mm_or_ps(_mm_andnot_ps(vm89AB, vy89AB), _mm_and_ps(vminus_one, vm89AB));
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 8,915
| 38.982063
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-p6h5ts-nr1-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_p6h5ts_nr1_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
input += 16;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
const __m128 vm89AB = _mm_cmple_ps(vz89AB, vsat_cutoff);
const __m128 vmCDEF = _mm_cmple_ps(vzCDEF, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
const __m128 vepo89AB = _mm_sub_ps(vminus_two, vemo89AB);
const __m128 vepoCDEF = _mm_sub_ps(vminus_two, vemoCDEF);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
__m128 vrepo89AB = _mm_rcp_ps(vepo89AB);
__m128 vrepoCDEF = _mm_rcp_ps(vepoCDEF);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_add_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
vrepoCDEF = _mm_mul_ps(vrepoCDEF, _mm_add_ps(_mm_mul_ps(vrepoCDEF, vepoCDEF), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
__m128 vy89AB = _mm_mul_ps(vemo89AB, vrepo89AB);
__m128 vyCDEF = _mm_mul_ps(vemoCDEF, vrepoCDEF);
vy0123 = _mm_or_ps(_mm_andnot_ps(vm0123, vy0123), _mm_and_ps(vminus_one, vm0123));
vy4567 = _mm_or_ps(_mm_andnot_ps(vm4567, vy4567), _mm_and_ps(vminus_one, vm4567));
vy89AB = _mm_or_ps(_mm_andnot_ps(vm89AB, vy89AB), _mm_and_ps(vminus_one, vm89AB));
vyCDEF = _mm_or_ps(_mm_andnot_ps(vmCDEF, vyCDEF), _mm_and_ps(vminus_one, vmCDEF));
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 10,359
| 41.113821
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-p6h5ts-nr1-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_p6h5ts_nr1_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 4,473
| 32.140741
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-p6h5ts-nr1-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_p6h5ts_nr1_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
vy0123 = _mm_or_ps(_mm_andnot_ps(vm0123, vy0123), _mm_and_ps(vminus_one, vm0123));
vy4567 = _mm_or_ps(_mm_andnot_ps(vm4567, vy4567), _mm_and_ps(vminus_one, vm4567));
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 7,468
| 36.345
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-p6h5ts-nr2-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_p6h5ts_nr2_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
input += 12;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
const __m128 vm89AB = _mm_cmple_ps(vz89AB, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
const __m128 vepo89AB = _mm_sub_ps(vminus_two, vemo89AB);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
__m128 vrepo89AB = _mm_rcp_ps(vepo89AB);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_add_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_sub_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_sub_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_sub_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
__m128 vy89AB = _mm_mul_ps(vemo89AB, vrepo89AB);
vy0123 = _mm_or_ps(_mm_andnot_ps(vm0123, vy0123), _mm_and_ps(vminus_one, vm0123));
vy4567 = _mm_or_ps(_mm_andnot_ps(vm4567, vy4567), _mm_and_ps(vminus_one, vm4567));
vy89AB = _mm_or_ps(_mm_andnot_ps(vm89AB, vy89AB), _mm_and_ps(vminus_one, vm89AB));
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 9,363
| 40.070175
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-p6h5ts-nr2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_p6h5ts_nr2_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
input += 16;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
const __m128 vm89AB = _mm_cmple_ps(vz89AB, vsat_cutoff);
const __m128 vmCDEF = _mm_cmple_ps(vzCDEF, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
const __m128 vepo89AB = _mm_sub_ps(vminus_two, vemo89AB);
const __m128 vepoCDEF = _mm_sub_ps(vminus_two, vemoCDEF);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
__m128 vrepo89AB = _mm_rcp_ps(vepo89AB);
__m128 vrepoCDEF = _mm_rcp_ps(vepoCDEF);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_add_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
vrepoCDEF = _mm_mul_ps(vrepoCDEF, _mm_add_ps(_mm_mul_ps(vrepoCDEF, vepoCDEF), vminus_two));
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_sub_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_sub_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo89AB = _mm_mul_ps(vrepo89AB, _mm_sub_ps(_mm_mul_ps(vrepo89AB, vepo89AB), vminus_two));
vrepoCDEF = _mm_mul_ps(vrepoCDEF, _mm_sub_ps(_mm_mul_ps(vrepoCDEF, vepoCDEF), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
__m128 vy89AB = _mm_mul_ps(vemo89AB, vrepo89AB);
__m128 vyCDEF = _mm_mul_ps(vemoCDEF, vrepoCDEF);
vy0123 = _mm_or_ps(_mm_andnot_ps(vm0123, vy0123), _mm_and_ps(vminus_one, vm0123));
vy4567 = _mm_or_ps(_mm_andnot_ps(vm4567, vy4567), _mm_and_ps(vminus_one, vm4567));
vy89AB = _mm_or_ps(_mm_andnot_ps(vm89AB, vy89AB), _mm_and_ps(vminus_one, vm89AB));
vyCDEF = _mm_or_ps(_mm_andnot_ps(vmCDEF, vyCDEF), _mm_and_ps(vminus_one, vmCDEF));
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 10,903
| 42.269841
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-p6h5ts-nr2-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_p6h5ts_nr2_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 4,633
| 32.824818
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse2-expm1minus-rr1-p6h5ts-nr2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse2_expm1minus_rr1_p6h5ts_nr2_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vm0123 = _mm_cmple_ps(vz0123, vsat_cutoff);
const __m128 vm4567 = _mm_cmple_ps(vz4567, vsat_cutoff);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vepo0123 = _mm_sub_ps(vminus_two, vemo0123);
const __m128 vepo4567 = _mm_sub_ps(vminus_two, vemo4567);
__m128 vrepo0123 = _mm_rcp_ps(vepo0123);
__m128 vrepo4567 = _mm_rcp_ps(vepo4567);
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_add_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_add_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
vrepo0123 = _mm_mul_ps(vrepo0123, _mm_sub_ps(_mm_mul_ps(vrepo0123, vepo0123), vminus_two));
vrepo4567 = _mm_mul_ps(vrepo4567, _mm_sub_ps(_mm_mul_ps(vrepo4567, vepo4567), vminus_two));
__m128 vy0123 = _mm_mul_ps(vemo0123, vrepo0123);
__m128 vy4567 = _mm_mul_ps(vemo4567, vrepo4567);
vy0123 = _mm_or_ps(_mm_andnot_ps(vm0123, vy0123), _mm_and_ps(vminus_one, vm0123));
vy4567 = _mm_or_ps(_mm_andnot_ps(vm4567, vy4567), _mm_and_ps(vminus_one, vm4567));
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
__m128 vy = _mm_mul_ps(vemo, vrepo);
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 7,820
| 37.338235
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-lut8-p4h3ts-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_lut8_p4h3ts_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m128i vindex_mask = _mm_load_si128((const __m128i*) params->sse_expm1minus_rr1_lut8_p4h3.index_mask);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_ln2);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
input += 12;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
__m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
vz89AB = _mm_max_ps(vsat_cutoff, vz89AB);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
const __m128i ve0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 20);
const __m128i ve4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 20);
const __m128i ve89AB = _mm_slli_epi32(_mm_castps_si128(vn89AB), 20);
#if XNN_ARCH_X86_64
__m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
__m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
__m128i vidx89AB = _mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask);
const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
vidx0123 = _mm_unpackhi_epi64(vidx0123, vidx0123);
const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
vidx4567 = _mm_unpackhi_epi64(vidx4567, vidx4567);
const uint64_t vidx89 = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
vidx89AB = _mm_unpackhi_epi64(vidx89AB, vidx89AB);
const uint64_t vidx23 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
const uint64_t vidx67 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
const uint64_t vidxAB = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
__m128i vl0123 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx01]);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx01 >> 32)], 1);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx23], 2);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx23 >> 32)], 3);
__m128i vl4567 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx45]);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx45 >> 32)], 1);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx67], 2);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx67 >> 32)], 3);
__m128i vl89AB = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx89]);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx89 >> 32)], 1);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxAB], 2);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxAB >> 32)], 3);
#else
const __m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
const __m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
const __m128i vidx89AB = _mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
const uint32_t vidx8 = (uint32_t) _mm_cvtsi128_si32(vidx89AB);
__m128i vl0123 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
__m128i vl4567 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx4]);
__m128i vl89AB = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx8]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
const uint32_t vidx9 = (uint32_t) _mm_extract_epi16(vidx89AB, 2);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[vidx5], 1);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[vidx9], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
const uint32_t vidxA = (uint32_t) _mm_extract_epi16(vidx89AB, 4);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[vidx6], 2);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[vidxA], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
const uint32_t vidxB = (uint32_t) _mm_extract_epi16(vidx89AB, 6);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[vidx7], 3);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[vidxB], 3);
#endif
const __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ve0123));
const __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ve4567));
const __m128 vs89AB = _mm_castsi128_ps(_mm_add_epi32(vl89AB, ve89AB));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc4, vt0123), vc3);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc4, vt4567), vc3);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc4, vt89AB), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
const __m128 vepo89AB = _mm_sub_ps(vemo89AB, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
__m128 vy89AB = _mm_div_ps(vemo89AB, vepo89AB);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], 1);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi], 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], 3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
#endif
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], 1);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi], 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], 3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
#endif
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 14,610
| 45.680511
| 111
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-lut8-p4h3ts-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_lut8_p4h3ts_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m128i vindex_mask = _mm_load_si128((const __m128i*) params->sse_expm1minus_rr1_lut8_p4h3.index_mask);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_ln2);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
input += 16;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
__m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
__m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
vz89AB = _mm_max_ps(vsat_cutoff, vz89AB);
vzCDEF = _mm_max_ps(vsat_cutoff, vzCDEF);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
const __m128i ve0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 20);
const __m128i ve4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 20);
const __m128i ve89AB = _mm_slli_epi32(_mm_castps_si128(vn89AB), 20);
const __m128i veCDEF = _mm_slli_epi32(_mm_castps_si128(vnCDEF), 20);
#if XNN_ARCH_X86_64
__m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
__m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
__m128i vidx89AB = _mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask);
__m128i vidxCDEF = _mm_and_si128(_mm_castps_si128(vnCDEF), vindex_mask);
const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
vidx0123 = _mm_unpackhi_epi64(vidx0123, vidx0123);
const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
vidx4567 = _mm_unpackhi_epi64(vidx4567, vidx4567);
const uint64_t vidx89 = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
vidx89AB = _mm_unpackhi_epi64(vidx89AB, vidx89AB);
const uint64_t vidxCD = (uint64_t) _mm_cvtsi128_si64(vidxCDEF);
vidxCDEF = _mm_unpackhi_epi64(vidxCDEF, vidxCDEF);
const uint64_t vidx23 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
const uint64_t vidx67 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
const uint64_t vidxAB = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
const uint64_t vidxEF = (uint64_t) _mm_cvtsi128_si64(vidxCDEF);
__m128i vl0123 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx01]);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx01 >> 32)], 1);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx23], 2);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx23 >> 32)], 3);
__m128i vl4567 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx45]);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx45 >> 32)], 1);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx67], 2);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx67 >> 32)], 3);
__m128i vl89AB = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx89]);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx89 >> 32)], 1);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxAB], 2);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxAB >> 32)], 3);
__m128i vlCDEF = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxCD]);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxCD >> 32)], 1);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxEF], 2);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxEF >> 32)], 3);
#else
const __m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
const __m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
const __m128i vidx89AB = _mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask);
const __m128i vidxCDEF = _mm_and_si128(_mm_castps_si128(vnCDEF), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
const uint32_t vidx8 = (uint32_t) _mm_cvtsi128_si32(vidx89AB);
const uint32_t vidxC = (uint32_t) _mm_cvtsi128_si32(vidxCDEF);
__m128i vl0123 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
__m128i vl4567 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx4]);
__m128i vl89AB = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx8]);
__m128i vlCDEF = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidxC]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
const uint32_t vidx9 = (uint32_t) _mm_extract_epi16(vidx89AB, 2);
const uint32_t vidxD = (uint32_t) _mm_extract_epi16(vidxCDEF, 2);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[vidx5], 1);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[vidx9], 1);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[vidxD], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
const uint32_t vidxA = (uint32_t) _mm_extract_epi16(vidx89AB, 4);
const uint32_t vidxE = (uint32_t) _mm_extract_epi16(vidxCDEF, 4);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[vidx6], 2);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[vidxA], 2);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[vidxE], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
const uint32_t vidxB = (uint32_t) _mm_extract_epi16(vidx89AB, 6);
const uint32_t vidxF = (uint32_t) _mm_extract_epi16(vidxCDEF, 6);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[vidx7], 3);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[vidxB], 3);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[vidxF], 3);
#endif
const __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ve0123));
const __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ve4567));
const __m128 vs89AB = _mm_castsi128_ps(_mm_add_epi32(vl89AB, ve89AB));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_add_epi32(vlCDEF, veCDEF));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc4, vt0123), vc3);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc4, vt4567), vc3);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc4, vt89AB), vc3);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc4, vtCDEF), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
const __m128 vepo89AB = _mm_sub_ps(vemo89AB, vminus_two);
const __m128 vepoCDEF = _mm_sub_ps(vemoCDEF, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
__m128 vy89AB = _mm_div_ps(vemo89AB, vepo89AB);
__m128 vyCDEF = _mm_div_ps(vemoCDEF, vepoCDEF);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], 1);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi], 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], 3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
#endif
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], 1);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi], 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], 3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
#endif
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 17,148
| 48.137536
| 111
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-lut8-p4h3ts-div-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_lut8_p4h3ts_div_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m128i vindex_mask = _mm_load_si128((const __m128i*) params->sse_expm1minus_rr1_lut8_p4h3.index_mask);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_ln2);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_one);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
const __m128 vxGHIJ = _mm_loadu_ps(input + 16);
input += 20;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
__m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
__m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
__m128 vzGHIJ = _mm_or_ps(vxGHIJ, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
const __m128 vinvsignxGHIJ = _mm_xor_ps(vxGHIJ, vzGHIJ);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
vz89AB = _mm_max_ps(vsat_cutoff, vz89AB);
vzCDEF = _mm_max_ps(vsat_cutoff, vzCDEF);
vzGHIJ = _mm_max_ps(vsat_cutoff, vzGHIJ);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
const __m128i ve0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 20);
const __m128i ve4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 20);
const __m128i ve89AB = _mm_slli_epi32(_mm_castps_si128(vn89AB), 20);
const __m128i veCDEF = _mm_slli_epi32(_mm_castps_si128(vnCDEF), 20);
const __m128i veGHIJ = _mm_slli_epi32(_mm_castps_si128(vnGHIJ), 20);
#if XNN_ARCH_X86_64
__m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
__m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
__m128i vidx89AB = _mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask);
__m128i vidxCDEF = _mm_and_si128(_mm_castps_si128(vnCDEF), vindex_mask);
__m128i vidxGHIJ = _mm_and_si128(_mm_castps_si128(vnGHIJ), vindex_mask);
const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
vidx0123 = _mm_unpackhi_epi64(vidx0123, vidx0123);
const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
vidx4567 = _mm_unpackhi_epi64(vidx4567, vidx4567);
const uint64_t vidx89 = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
vidx89AB = _mm_unpackhi_epi64(vidx89AB, vidx89AB);
const uint64_t vidxCD = (uint64_t) _mm_cvtsi128_si64(vidxCDEF);
vidxCDEF = _mm_unpackhi_epi64(vidxCDEF, vidxCDEF);
const uint64_t vidxGH = (uint64_t) _mm_cvtsi128_si64(vidxGHIJ);
vidxGHIJ = _mm_unpackhi_epi64(vidxGHIJ, vidxGHIJ);
const uint64_t vidx23 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
const uint64_t vidx67 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
const uint64_t vidxAB = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
const uint64_t vidxEF = (uint64_t) _mm_cvtsi128_si64(vidxCDEF);
const uint64_t vidxIJ = (uint64_t) _mm_cvtsi128_si64(vidxGHIJ);
__m128i vl0123 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx01]);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx01 >> 32)], 1);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx23], 2);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx23 >> 32)], 3);
__m128i vl4567 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx45]);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx45 >> 32)], 1);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx67], 2);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx67 >> 32)], 3);
__m128i vl89AB = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx89]);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx89 >> 32)], 1);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxAB], 2);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxAB >> 32)], 3);
__m128i vlCDEF = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxCD]);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxCD >> 32)], 1);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxEF], 2);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxEF >> 32)], 3);
__m128i vlGHIJ = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxGH]);
vlGHIJ = _mm_insert_epi32(vlGHIJ, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxGH >> 32)], 1);
vlGHIJ = _mm_insert_epi32(vlGHIJ, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxIJ], 2);
vlGHIJ = _mm_insert_epi32(vlGHIJ, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxIJ >> 32)], 3);
#else
const __m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
const __m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
const __m128i vidx89AB = _mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask);
const __m128i vidxCDEF = _mm_and_si128(_mm_castps_si128(vnCDEF), vindex_mask);
const __m128i vidxGHIJ = _mm_and_si128(_mm_castps_si128(vnGHIJ), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
const uint32_t vidx8 = (uint32_t) _mm_cvtsi128_si32(vidx89AB);
const uint32_t vidxC = (uint32_t) _mm_cvtsi128_si32(vidxCDEF);
const uint32_t vidxG = (uint32_t) _mm_cvtsi128_si32(vidxGHIJ);
__m128i vl0123 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
__m128i vl4567 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx4]);
__m128i vl89AB = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx8]);
__m128i vlCDEF = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidxC]);
__m128i vlGHIJ = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidxG]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
const uint32_t vidx9 = (uint32_t) _mm_extract_epi16(vidx89AB, 2);
const uint32_t vidxD = (uint32_t) _mm_extract_epi16(vidxCDEF, 2);
const uint32_t vidxH = (uint32_t) _mm_extract_epi16(vidxGHIJ, 2);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[vidx5], 1);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[vidx9], 1);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[vidxD], 1);
vlGHIJ = _mm_insert_epi32(vlGHIJ, (int) xnn_table_exp2minus_k_over_8[vidxH], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
const uint32_t vidxA = (uint32_t) _mm_extract_epi16(vidx89AB, 4);
const uint32_t vidxE = (uint32_t) _mm_extract_epi16(vidxCDEF, 4);
const uint32_t vidxI = (uint32_t) _mm_extract_epi16(vidxGHIJ, 4);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[vidx6], 2);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[vidxA], 2);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[vidxE], 2);
vlGHIJ = _mm_insert_epi32(vlGHIJ, (int) xnn_table_exp2minus_k_over_8[vidxI], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
const uint32_t vidxB = (uint32_t) _mm_extract_epi16(vidx89AB, 6);
const uint32_t vidxF = (uint32_t) _mm_extract_epi16(vidxCDEF, 6);
const uint32_t vidxJ = (uint32_t) _mm_extract_epi16(vidxGHIJ, 6);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[vidx7], 3);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[vidxB], 3);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[vidxF], 3);
vlGHIJ = _mm_insert_epi32(vlGHIJ, (int) xnn_table_exp2minus_k_over_8[vidxJ], 3);
#endif
const __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ve0123));
const __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ve4567));
const __m128 vs89AB = _mm_castsi128_ps(_mm_add_epi32(vl89AB, ve89AB));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_add_epi32(vlCDEF, veCDEF));
const __m128 vsGHIJ = _mm_castsi128_ps(_mm_add_epi32(vlGHIJ, veGHIJ));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
const __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2), vzGHIJ);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc4, vt0123), vc3);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc4, vt4567), vc3);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc4, vt89AB), vc3);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc4, vtCDEF), vc3);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc4, vtGHIJ), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
vpGHIJ = _mm_sub_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vtsGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
const __m128 vsmoGHIJ = _mm_add_ps(vsGHIJ, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vemoGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtsGHIJ), vsmoGHIJ);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
const __m128 vepo89AB = _mm_sub_ps(vemo89AB, vminus_two);
const __m128 vepoCDEF = _mm_sub_ps(vemoCDEF, vminus_two);
const __m128 vepoGHIJ = _mm_sub_ps(vemoGHIJ, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
__m128 vy89AB = _mm_div_ps(vemo89AB, vepo89AB);
__m128 vyCDEF = _mm_div_ps(vemoCDEF, vepoCDEF);
__m128 vyGHIJ = _mm_div_ps(vemoGHIJ, vepoGHIJ);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
vyGHIJ = _mm_xor_ps(vyGHIJ, vinvsignxGHIJ);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], 1);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi], 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], 3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
#endif
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], 1);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi], 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], 3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
#endif
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 19,686
| 50.135065
| 111
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-lut8-p4h3ts-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_lut8_p4h3ts_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m128i vindex_mask = _mm_load_si128((const __m128i*) params->sse_expm1minus_rr1_lut8_p4h3.index_mask);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_ln2);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
const __m128 vxGHIJ = _mm_loadu_ps(input + 16);
const __m128 vxKLMN = _mm_loadu_ps(input + 20);
input += 24;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
__m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
__m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
__m128 vzGHIJ = _mm_or_ps(vxGHIJ, vsign_mask);
__m128 vzKLMN = _mm_or_ps(vxKLMN, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
const __m128 vinvsignxGHIJ = _mm_xor_ps(vxGHIJ, vzGHIJ);
const __m128 vinvsignxKLMN = _mm_xor_ps(vxKLMN, vzKLMN);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
vz89AB = _mm_max_ps(vsat_cutoff, vz89AB);
vzCDEF = _mm_max_ps(vsat_cutoff, vzCDEF);
vzGHIJ = _mm_max_ps(vsat_cutoff, vzGHIJ);
vzKLMN = _mm_max_ps(vsat_cutoff, vzKLMN);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
__m128 vnKLMN = _mm_add_ps(_mm_mul_ps(vzKLMN, vlog2e), vmagic_bias);
const __m128i ve0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 20);
const __m128i ve4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 20);
const __m128i ve89AB = _mm_slli_epi32(_mm_castps_si128(vn89AB), 20);
const __m128i veCDEF = _mm_slli_epi32(_mm_castps_si128(vnCDEF), 20);
const __m128i veGHIJ = _mm_slli_epi32(_mm_castps_si128(vnGHIJ), 20);
const __m128i veKLMN = _mm_slli_epi32(_mm_castps_si128(vnKLMN), 20);
#if XNN_ARCH_X86_64
__m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
__m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
__m128i vidx89AB = _mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask);
__m128i vidxCDEF = _mm_and_si128(_mm_castps_si128(vnCDEF), vindex_mask);
__m128i vidxGHIJ = _mm_and_si128(_mm_castps_si128(vnGHIJ), vindex_mask);
__m128i vidxKLMN = _mm_and_si128(_mm_castps_si128(vnKLMN), vindex_mask);
const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
vidx0123 = _mm_unpackhi_epi64(vidx0123, vidx0123);
const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
vidx4567 = _mm_unpackhi_epi64(vidx4567, vidx4567);
const uint64_t vidx89 = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
vidx89AB = _mm_unpackhi_epi64(vidx89AB, vidx89AB);
const uint64_t vidxCD = (uint64_t) _mm_cvtsi128_si64(vidxCDEF);
vidxCDEF = _mm_unpackhi_epi64(vidxCDEF, vidxCDEF);
const uint64_t vidxGH = (uint64_t) _mm_cvtsi128_si64(vidxGHIJ);
vidxGHIJ = _mm_unpackhi_epi64(vidxGHIJ, vidxGHIJ);
const uint64_t vidxKL = (uint64_t) _mm_cvtsi128_si64(vidxKLMN);
vidxKLMN = _mm_unpackhi_epi64(vidxKLMN, vidxKLMN);
const uint64_t vidx23 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
const uint64_t vidx67 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
const uint64_t vidxAB = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
const uint64_t vidxEF = (uint64_t) _mm_cvtsi128_si64(vidxCDEF);
const uint64_t vidxIJ = (uint64_t) _mm_cvtsi128_si64(vidxGHIJ);
const uint64_t vidxMN = (uint64_t) _mm_cvtsi128_si64(vidxKLMN);
__m128i vl0123 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx01]);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx01 >> 32)], 1);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx23], 2);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx23 >> 32)], 3);
__m128i vl4567 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx45]);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx45 >> 32)], 1);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx67], 2);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx67 >> 32)], 3);
__m128i vl89AB = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx89]);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx89 >> 32)], 1);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxAB], 2);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxAB >> 32)], 3);
__m128i vlCDEF = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxCD]);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxCD >> 32)], 1);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxEF], 2);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxEF >> 32)], 3);
__m128i vlGHIJ = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxGH]);
vlGHIJ = _mm_insert_epi32(vlGHIJ, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxGH >> 32)], 1);
vlGHIJ = _mm_insert_epi32(vlGHIJ, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxIJ], 2);
vlGHIJ = _mm_insert_epi32(vlGHIJ, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxIJ >> 32)], 3);
__m128i vlKLMN = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxKL]);
vlKLMN = _mm_insert_epi32(vlKLMN, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxKL >> 32)], 1);
vlKLMN = _mm_insert_epi32(vlKLMN, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidxMN], 2);
vlKLMN = _mm_insert_epi32(vlKLMN, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidxMN >> 32)], 3);
#else
const __m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
const __m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
const __m128i vidx89AB = _mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask);
const __m128i vidxCDEF = _mm_and_si128(_mm_castps_si128(vnCDEF), vindex_mask);
const __m128i vidxGHIJ = _mm_and_si128(_mm_castps_si128(vnGHIJ), vindex_mask);
const __m128i vidxKLMN = _mm_and_si128(_mm_castps_si128(vnKLMN), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
const uint32_t vidx8 = (uint32_t) _mm_cvtsi128_si32(vidx89AB);
const uint32_t vidxC = (uint32_t) _mm_cvtsi128_si32(vidxCDEF);
const uint32_t vidxG = (uint32_t) _mm_cvtsi128_si32(vidxGHIJ);
const uint32_t vidxK = (uint32_t) _mm_cvtsi128_si32(vidxKLMN);
__m128i vl0123 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
__m128i vl4567 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx4]);
__m128i vl89AB = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx8]);
__m128i vlCDEF = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidxC]);
__m128i vlGHIJ = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidxG]);
__m128i vlKLMN = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidxK]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
const uint32_t vidx9 = (uint32_t) _mm_extract_epi16(vidx89AB, 2);
const uint32_t vidxD = (uint32_t) _mm_extract_epi16(vidxCDEF, 2);
const uint32_t vidxH = (uint32_t) _mm_extract_epi16(vidxGHIJ, 2);
const uint32_t vidxL = (uint32_t) _mm_extract_epi16(vidxKLMN, 2);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[vidx5], 1);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[vidx9], 1);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[vidxD], 1);
vlGHIJ = _mm_insert_epi32(vlGHIJ, (int) xnn_table_exp2minus_k_over_8[vidxH], 1);
vlKLMN = _mm_insert_epi32(vlKLMN, (int) xnn_table_exp2minus_k_over_8[vidxL], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
const uint32_t vidxA = (uint32_t) _mm_extract_epi16(vidx89AB, 4);
const uint32_t vidxE = (uint32_t) _mm_extract_epi16(vidxCDEF, 4);
const uint32_t vidxI = (uint32_t) _mm_extract_epi16(vidxGHIJ, 4);
const uint32_t vidxM = (uint32_t) _mm_extract_epi16(vidxKLMN, 4);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[vidx6], 2);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[vidxA], 2);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[vidxE], 2);
vlGHIJ = _mm_insert_epi32(vlGHIJ, (int) xnn_table_exp2minus_k_over_8[vidxI], 2);
vlKLMN = _mm_insert_epi32(vlKLMN, (int) xnn_table_exp2minus_k_over_8[vidxM], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
const uint32_t vidxB = (uint32_t) _mm_extract_epi16(vidx89AB, 6);
const uint32_t vidxF = (uint32_t) _mm_extract_epi16(vidxCDEF, 6);
const uint32_t vidxJ = (uint32_t) _mm_extract_epi16(vidxGHIJ, 6);
const uint32_t vidxN = (uint32_t) _mm_extract_epi16(vidxKLMN, 6);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[vidx7], 3);
vl89AB = _mm_insert_epi32(vl89AB, (int) xnn_table_exp2minus_k_over_8[vidxB], 3);
vlCDEF = _mm_insert_epi32(vlCDEF, (int) xnn_table_exp2minus_k_over_8[vidxF], 3);
vlGHIJ = _mm_insert_epi32(vlGHIJ, (int) xnn_table_exp2minus_k_over_8[vidxJ], 3);
vlKLMN = _mm_insert_epi32(vlKLMN, (int) xnn_table_exp2minus_k_over_8[vidxN], 3);
#endif
const __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ve0123));
const __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ve4567));
const __m128 vs89AB = _mm_castsi128_ps(_mm_add_epi32(vl89AB, ve89AB));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_add_epi32(vlCDEF, veCDEF));
const __m128 vsGHIJ = _mm_castsi128_ps(_mm_add_epi32(vlGHIJ, veGHIJ));
const __m128 vsKLMN = _mm_castsi128_ps(_mm_add_epi32(vlKLMN, veKLMN));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
vnKLMN = _mm_sub_ps(vnKLMN, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
const __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2), vzGHIJ);
const __m128 vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2), vzKLMN);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc4, vt0123), vc3);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc4, vt4567), vc3);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc4, vt89AB), vc3);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc4, vtCDEF), vc3);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc4, vtGHIJ), vc3);
__m128 vpKLMN = _mm_add_ps(_mm_mul_ps(vc4, vtKLMN), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
vpGHIJ = _mm_sub_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vminus_two);
vpKLMN = _mm_sub_ps(_mm_mul_ps(vpKLMN, vtKLMN), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vtsGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
const __m128 vsmoGHIJ = _mm_add_ps(vsGHIJ, vminus_one);
const __m128 vtsKLMN = _mm_mul_ps(vtKLMN, vsKLMN);
const __m128 vsmoKLMN = _mm_add_ps(vsKLMN, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vemoGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtsGHIJ), vsmoGHIJ);
const __m128 vemoKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtsKLMN), vsmoKLMN);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
const __m128 vepo89AB = _mm_sub_ps(vemo89AB, vminus_two);
const __m128 vepoCDEF = _mm_sub_ps(vemoCDEF, vminus_two);
const __m128 vepoGHIJ = _mm_sub_ps(vemoGHIJ, vminus_two);
const __m128 vepoKLMN = _mm_sub_ps(vemoKLMN, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
__m128 vy89AB = _mm_div_ps(vemo89AB, vepo89AB);
__m128 vyCDEF = _mm_div_ps(vemoCDEF, vepoCDEF);
__m128 vyGHIJ = _mm_div_ps(vemoGHIJ, vepoGHIJ);
__m128 vyKLMN = _mm_div_ps(vemoKLMN, vepoKLMN);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
vyGHIJ = _mm_xor_ps(vyGHIJ, vinvsignxGHIJ);
vyKLMN = _mm_xor_ps(vyKLMN, vinvsignxKLMN);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
_mm_storeu_ps(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], 1);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi], 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], 3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
#endif
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], 1);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi], 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], 3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
#endif
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 22,224
| 51.790974
| 111
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-lut8-p4h3ts-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_lut8_p4h3ts_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m128i vindex_mask = _mm_load_si128((const __m128i*) params->sse_expm1minus_rr1_lut8_p4h3.index_mask);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_ln2);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], 1);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi], 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], 3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
#endif
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], 1);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi], 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], 3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
#endif
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 6,829
| 38.94152
| 111
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-lut8-p4h3ts-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_lut8_p4h3ts_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m128i vindex_mask = _mm_load_si128((const __m128i*) params->sse_expm1minus_rr1_lut8_p4h3.index_mask);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_ln2);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_lut8_p4h3.minus_one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
const __m128i ve0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 20);
const __m128i ve4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 20);
#if XNN_ARCH_X86_64
__m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
__m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
vidx0123 = _mm_unpackhi_epi64(vidx0123, vidx0123);
const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
vidx4567 = _mm_unpackhi_epi64(vidx4567, vidx4567);
const uint64_t vidx23 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
const uint64_t vidx67 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
__m128i vl0123 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx01]);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx01 >> 32)], 1);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx23], 2);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx23 >> 32)], 3);
__m128i vl4567 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx45]);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx45 >> 32)], 1);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx67], 2);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx67 >> 32)], 3);
#else
const __m128i vidx0123 = _mm_and_si128(_mm_castps_si128(vn0123), vindex_mask);
const __m128i vidx4567 = _mm_and_si128(_mm_castps_si128(vn4567), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
__m128i vl0123 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
__m128i vl4567 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx4]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[vidx5], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[vidx6], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
vl0123 = _mm_insert_epi32(vl0123, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
vl4567 = _mm_insert_epi32(vl4567, (int) xnn_table_exp2minus_k_over_8[vidx7], 3);
#endif
const __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ve0123));
const __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ve4567));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc4, vt0123), vc3);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc4, vt4567), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], 1);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi], 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], 3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
#endif
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)], 1);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi], 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)], 3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
__m128i vl = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx1], 1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx2], 2);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
vl = _mm_insert_epi32(vl, (int) xnn_table_exp2minus_k_over_8[vidx3], 3);
#endif
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 12,069
| 42.574007
| 111
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
input += 12;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
__m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
vz89AB = _mm_max_ps(vsat_cutoff, vz89AB);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
const __m128 vepo89AB = _mm_sub_ps(vemo89AB, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
__m128 vy89AB = _mm_div_ps(vemo89AB, vepo89AB);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 7,743
| 36.77561
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
input += 16;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
__m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
__m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
vz89AB = _mm_max_ps(vsat_cutoff, vz89AB);
vzCDEF = _mm_max_ps(vsat_cutoff, vzCDEF);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
const __m128 vepo89AB = _mm_sub_ps(vemo89AB, vminus_two);
const __m128 vepoCDEF = _mm_sub_ps(vemoCDEF, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
__m128 vy89AB = _mm_div_ps(vemo89AB, vepo89AB);
__m128 vyCDEF = _mm_div_ps(vemoCDEF, vepoCDEF);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 8,937
| 38.724444
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-div-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_div_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
const __m128 vxGHIJ = _mm_loadu_ps(input + 16);
input += 20;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
__m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
__m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
__m128 vzGHIJ = _mm_or_ps(vxGHIJ, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
const __m128 vinvsignxGHIJ = _mm_xor_ps(vxGHIJ, vzGHIJ);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
vz89AB = _mm_max_ps(vsat_cutoff, vz89AB);
vzCDEF = _mm_max_ps(vsat_cutoff, vzCDEF);
vzGHIJ = _mm_max_ps(vsat_cutoff, vzGHIJ);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
const __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
const __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2), vzGHIJ);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc6, vtGHIJ), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
vpGHIJ = _mm_sub_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vtsGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
const __m128 vsmoGHIJ = _mm_add_ps(vsGHIJ, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vemoGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtsGHIJ), vsmoGHIJ);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
const __m128 vepo89AB = _mm_sub_ps(vemo89AB, vminus_two);
const __m128 vepoCDEF = _mm_sub_ps(vemoCDEF, vminus_two);
const __m128 vepoGHIJ = _mm_sub_ps(vemoGHIJ, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
__m128 vy89AB = _mm_div_ps(vemo89AB, vepo89AB);
__m128 vyCDEF = _mm_div_ps(vemoCDEF, vepoCDEF);
__m128 vyGHIJ = _mm_div_ps(vemoGHIJ, vepoGHIJ);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
vyGHIJ = _mm_xor_ps(vyGHIJ, vinvsignxGHIJ);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 10,131
| 40.355102
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
const __m128 vxGHIJ = _mm_loadu_ps(input + 16);
const __m128 vxKLMN = _mm_loadu_ps(input + 20);
input += 24;
__m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
__m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
__m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
__m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
__m128 vzGHIJ = _mm_or_ps(vxGHIJ, vsign_mask);
__m128 vzKLMN = _mm_or_ps(vxKLMN, vsign_mask);
const __m128 vinvsignx0123 = _mm_xor_ps(vx0123, vz0123);
const __m128 vinvsignx4567 = _mm_xor_ps(vx4567, vz4567);
const __m128 vinvsignx89AB = _mm_xor_ps(vx89AB, vz89AB);
const __m128 vinvsignxCDEF = _mm_xor_ps(vxCDEF, vzCDEF);
const __m128 vinvsignxGHIJ = _mm_xor_ps(vxGHIJ, vzGHIJ);
const __m128 vinvsignxKLMN = _mm_xor_ps(vxKLMN, vzKLMN);
vz0123 = _mm_max_ps(vsat_cutoff, vz0123);
vz4567 = _mm_max_ps(vsat_cutoff, vz4567);
vz89AB = _mm_max_ps(vsat_cutoff, vz89AB);
vzCDEF = _mm_max_ps(vsat_cutoff, vzCDEF);
vzGHIJ = _mm_max_ps(vsat_cutoff, vzGHIJ);
vzKLMN = _mm_max_ps(vsat_cutoff, vzKLMN);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
__m128 vnKLMN = _mm_add_ps(_mm_mul_ps(vzKLMN, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
const __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
const __m128 vsKLMN = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnKLMN), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
vnKLMN = _mm_sub_ps(vnKLMN, vmagic_bias);
const __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2), vz0123);
const __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2), vz4567);
const __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2), vz89AB);
const __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2), vzCDEF);
const __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2), vzGHIJ);
const __m128 vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2), vzKLMN);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc6, vtGHIJ), vc5);
__m128 vpKLMN = _mm_add_ps(_mm_mul_ps(vc6, vtKLMN), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc4);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc2);
vp0123 = _mm_sub_ps(_mm_mul_ps(vp0123, vt0123), vminus_two);
vp4567 = _mm_sub_ps(_mm_mul_ps(vp4567, vt4567), vminus_two);
vp89AB = _mm_sub_ps(_mm_mul_ps(vp89AB, vt89AB), vminus_two);
vpCDEF = _mm_sub_ps(_mm_mul_ps(vpCDEF, vtCDEF), vminus_two);
vpGHIJ = _mm_sub_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vminus_two);
vpKLMN = _mm_sub_ps(_mm_mul_ps(vpKLMN, vtKLMN), vminus_two);
const __m128 vts0123 = _mm_mul_ps(vt0123, vs0123);
const __m128 vsmo0123 = _mm_add_ps(vs0123, vminus_one);
const __m128 vts4567 = _mm_mul_ps(vt4567, vs4567);
const __m128 vsmo4567 = _mm_add_ps(vs4567, vminus_one);
const __m128 vts89AB = _mm_mul_ps(vt89AB, vs89AB);
const __m128 vsmo89AB = _mm_add_ps(vs89AB, vminus_one);
const __m128 vtsCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
const __m128 vsmoCDEF = _mm_add_ps(vsCDEF, vminus_one);
const __m128 vtsGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
const __m128 vsmoGHIJ = _mm_add_ps(vsGHIJ, vminus_one);
const __m128 vtsKLMN = _mm_mul_ps(vtKLMN, vsKLMN);
const __m128 vsmoKLMN = _mm_add_ps(vsKLMN, vminus_one);
const __m128 vemo0123 = _mm_add_ps(_mm_mul_ps(vp0123, vts0123), vsmo0123);
const __m128 vemo4567 = _mm_add_ps(_mm_mul_ps(vp4567, vts4567), vsmo4567);
const __m128 vemo89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vts89AB), vsmo89AB);
const __m128 vemoCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtsCDEF), vsmoCDEF);
const __m128 vemoGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtsGHIJ), vsmoGHIJ);
const __m128 vemoKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtsKLMN), vsmoKLMN);
const __m128 vepo0123 = _mm_sub_ps(vemo0123, vminus_two);
const __m128 vepo4567 = _mm_sub_ps(vemo4567, vminus_two);
const __m128 vepo89AB = _mm_sub_ps(vemo89AB, vminus_two);
const __m128 vepoCDEF = _mm_sub_ps(vemoCDEF, vminus_two);
const __m128 vepoGHIJ = _mm_sub_ps(vemoGHIJ, vminus_two);
const __m128 vepoKLMN = _mm_sub_ps(vemoKLMN, vminus_two);
__m128 vy0123 = _mm_div_ps(vemo0123, vepo0123);
__m128 vy4567 = _mm_div_ps(vemo4567, vepo4567);
__m128 vy89AB = _mm_div_ps(vemo89AB, vepo89AB);
__m128 vyCDEF = _mm_div_ps(vemoCDEF, vepoCDEF);
__m128 vyGHIJ = _mm_div_ps(vemoGHIJ, vepoGHIJ);
__m128 vyKLMN = _mm_div_ps(vemoKLMN, vepoKLMN);
vy0123 = _mm_xor_ps(vy0123, vinvsignx0123);
vy4567 = _mm_xor_ps(vy4567, vinvsignx4567);
vy89AB = _mm_xor_ps(vy89AB, vinvsignx89AB);
vyCDEF = _mm_xor_ps(vyCDEF, vinvsignxCDEF);
vyGHIJ = _mm_xor_ps(vyGHIJ, vinvsignxGHIJ);
vyKLMN = _mm_xor_ps(vyKLMN, vinvsignxKLMN);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
_mm_storeu_ps(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 11,325
| 41.739623
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-sse41-expm1minus-rr1-p6h5ts-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__sse41_expm1minus_rr1_p6h5ts_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sign_mask);
const __m128 vsat_cutoff = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.sat_cutoff);
const __m128 vlog2e = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.magic_bias);
const __m128 vminus_ln2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_ln2);
const __m128 vc6 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c6);
const __m128 vc5 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c5);
const __m128 vc4 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c4);
const __m128 vc3 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c3);
const __m128 vc2 = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.c2);
const __m128 vminus_two = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_two);
const __m128 vminus_one = _mm_load_ps(params->sse_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vz = _mm_or_ps(vx, vsign_mask);
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
vz = _mm_max_ps(vsat_cutoff, vz);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
__m128 vy = _mm_div_ps(vemo, vepo);
vy = _mm_xor_ps(vy, vinvsignx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 4,052
| 30.913386
| 87
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.