repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-div-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_div_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 9,759
| 37.577075
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-div-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_div_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m256 vy5 = _mm256_div_ps(vemo5, vepo5);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 10,795
| 38.40146
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-div-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_div_x56(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m256 vy5 = _mm256_div_ps(vemo5, vepo5);
__m256 vy6 = _mm256_div_ps(vemo6, vepo6);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 11,831
| 39.108475
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-div-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_div_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
vz7 = _mm256_max_ps(vsat_cutoff, vz7);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m256 vy5 = _mm256_div_ps(vemo5, vepo5);
__m256 vy6 = _mm256_div_ps(vemo6, vepo6);
__m256 vy7 = _mm256_div_ps(vemo7, vepo7);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 12,867
| 39.721519
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-div-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_div_x72(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
__m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
vz7 = _mm256_max_ps(vsat_cutoff, vz7);
const __m256 vinvsignx8 = _mm256_xor_ps(vx8, vz8);
vz8 = _mm256_max_ps(vsat_cutoff, vz8);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
__m256 vp8 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m256 vy5 = _mm256_div_ps(vemo5, vepo5);
__m256 vy6 = _mm256_div_ps(vemo6, vepo6);
__m256 vy7 = _mm256_div_ps(vemo7, vepo7);
__m256 vy8 = _mm256_div_ps(vemo8, vepo8);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
vy8 = _mm256_xor_ps(vy8, vinvsignx8);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 13,903
| 40.25816
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 4,471
| 32.62406
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-div-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_div_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
__m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
__m256 vz9 = _mm256_or_ps(vx9, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
vz7 = _mm256_max_ps(vsat_cutoff, vz7);
const __m256 vinvsignx8 = _mm256_xor_ps(vx8, vz8);
vz8 = _mm256_max_ps(vsat_cutoff, vz8);
const __m256 vinvsignx9 = _mm256_xor_ps(vx9, vz9);
vz9 = _mm256_max_ps(vsat_cutoff, vz9);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
const __m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
__m256 vp8 = vc6;
__m256 vp9 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc5);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
vp9 = _mm256_fmadd_ps(vp9, vt9, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vts9 = _mm256_mul_ps(vt9, vs9);
const __m256 vsmo9 = _mm256_add_ps(vs9, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vemo9 = _mm256_fmadd_ps(vp9, vts9, vsmo9);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
const __m256 vepo9 = _mm256_add_ps(vemo9, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m256 vy5 = _mm256_div_ps(vemo5, vepo5);
__m256 vy6 = _mm256_div_ps(vemo6, vepo6);
__m256 vy7 = _mm256_div_ps(vemo7, vepo7);
__m256 vy8 = _mm256_div_ps(vemo8, vepo8);
__m256 vy9 = _mm256_div_ps(vemo9, vepo9);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
vy8 = _mm256_xor_ps(vy8, vinvsignx8);
vy9 = _mm256_xor_ps(vy9, vinvsignx9);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
_mm256_storeu_ps(output + 72, vy9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 14,939
| 40.731844
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 7,637
| 35.028302
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 8,924
| 36.658228
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 10,211
| 37.977099
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 11,498
| 39.066202
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 12,785
| 39.980769
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1_x56(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5);
vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 14,072
| 40.759644
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
const __m256 vm7 = _mm256_cmp_ps(vz7, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vrepo7 = _mm256_rcp_ps(vepo7);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
const __m256 verepo7 = _mm256_fnmsub_ps(vrepo7, vepo7, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm256_fmadd_ps(verepo7, vrepo7, vrepo7);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
__m256 vy7 = _mm256_mul_ps(vemo7, vrepo7);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5);
vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6);
vy7 = _mm256_blendv_ps(vy7, vminus_one, vm7);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 15,359
| 41.430939
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1_x72(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
const __m256 vm7 = _mm256_cmp_ps(vz7, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx8 = _mm256_xor_ps(vx8, vz8);
const __m256 vm8 = _mm256_cmp_ps(vz8, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
__m256 vp8 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vrepo7 = _mm256_rcp_ps(vepo7);
__m256 vrepo8 = _mm256_rcp_ps(vepo8);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
const __m256 verepo7 = _mm256_fnmsub_ps(vrepo7, vepo7, vminus_one);
const __m256 verepo8 = _mm256_fnmsub_ps(vrepo8, vepo8, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm256_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm256_fmadd_ps(verepo8, vrepo8, vrepo8);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
__m256 vy7 = _mm256_mul_ps(vemo7, vrepo7);
__m256 vy8 = _mm256_mul_ps(vemo8, vrepo8);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5);
vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6);
vy7 = _mm256_blendv_ps(vy7, vminus_one, vm7);
vy8 = _mm256_blendv_ps(vy8, vminus_one, vm8);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
vy8 = _mm256_xor_ps(vy8, vinvsignx8);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 16,646
| 42.015504
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 4,953
| 33.165517
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
const __m256 vz9 = _mm256_or_ps(vx9, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
const __m256 vm7 = _mm256_cmp_ps(vz7, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx8 = _mm256_xor_ps(vx8, vz8);
const __m256 vm8 = _mm256_cmp_ps(vz8, vsat_cutoff, _CMP_LE_OS);
const __m256 vinvsignx9 = _mm256_xor_ps(vx9, vz9);
const __m256 vm9 = _mm256_cmp_ps(vz9, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
const __m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
__m256 vp8 = vc6;
__m256 vp9 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc5);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
vp9 = _mm256_fmadd_ps(vp9, vt9, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vts9 = _mm256_mul_ps(vt9, vs9);
const __m256 vsmo9 = _mm256_add_ps(vs9, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vemo9 = _mm256_fmadd_ps(vp9, vts9, vsmo9);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
const __m256 vepo9 = _mm256_add_ps(vemo9, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vrepo7 = _mm256_rcp_ps(vepo7);
__m256 vrepo8 = _mm256_rcp_ps(vepo8);
__m256 vrepo9 = _mm256_rcp_ps(vepo9);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
const __m256 verepo7 = _mm256_fnmsub_ps(vrepo7, vepo7, vminus_one);
const __m256 verepo8 = _mm256_fnmsub_ps(vrepo8, vepo8, vminus_one);
const __m256 verepo9 = _mm256_fnmsub_ps(vrepo9, vepo9, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm256_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm256_fmadd_ps(verepo8, vrepo8, vrepo8);
vrepo9 = _mm256_fmadd_ps(verepo9, vrepo9, vrepo9);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
__m256 vy7 = _mm256_mul_ps(vemo7, vrepo7);
__m256 vy8 = _mm256_mul_ps(vemo8, vrepo8);
__m256 vy9 = _mm256_mul_ps(vemo9, vrepo9);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5);
vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6);
vy7 = _mm256_blendv_ps(vy7, vminus_one, vm7);
vy8 = _mm256_blendv_ps(vy8, vminus_one, vm8);
vy9 = _mm256_blendv_ps(vy9, vminus_one, vm9);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
vy8 = _mm256_xor_ps(vy8, vinvsignx8);
vy9 = _mm256_xor_ps(vy9, vinvsignx9);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
_mm256_storeu_ps(output + 72, vy9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 17,933
| 42.529126
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1adj-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1adj_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 7,734
| 34.810185
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1adj-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1adj_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 9,047
| 36.38843
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1adj-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1adj_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
const __m256 vey3 = _mm256_fnmadd_ps(vy3, vepo3, vemo3);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm256_fmadd_ps(vey3, vrepo3, vy3);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 10,360
| 37.660448
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1adj-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1adj_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
const __m256 vey3 = _mm256_fnmadd_ps(vy3, vepo3, vemo3);
const __m256 vey4 = _mm256_fnmadd_ps(vy4, vepo4, vemo4);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm256_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm256_fmadd_ps(vey4, vrepo4, vy4);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 11,673
| 38.707483
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1adj-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1adj_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
const __m256 vey3 = _mm256_fnmadd_ps(vy3, vepo3, vemo3);
const __m256 vey4 = _mm256_fnmadd_ps(vy4, vepo4, vemo4);
const __m256 vey5 = _mm256_fnmadd_ps(vy5, vepo5, vemo5);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm256_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm256_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm256_fmadd_ps(vey5, vrepo5, vy5);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 12,986
| 39.584375
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1adj-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1adj_x56(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
const __m256 vey3 = _mm256_fnmadd_ps(vy3, vepo3, vemo3);
const __m256 vey4 = _mm256_fnmadd_ps(vy4, vepo4, vemo4);
const __m256 vey5 = _mm256_fnmadd_ps(vy5, vepo5, vemo5);
const __m256 vey6 = _mm256_fnmadd_ps(vy6, vepo6, vemo6);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm256_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm256_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm256_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm256_fmadd_ps(vey6, vrepo6, vy6);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 14,299
| 40.32948
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1adj-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1adj_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
vz7 = _mm256_max_ps(vsat_cutoff, vz7);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vrepo7 = _mm256_rcp_ps(vepo7);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
const __m256 verepo7 = _mm256_fnmsub_ps(vrepo7, vepo7, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm256_fmadd_ps(verepo7, vrepo7, vrepo7);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
__m256 vy7 = _mm256_mul_ps(vemo7, vrepo7);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
const __m256 vey3 = _mm256_fnmadd_ps(vy3, vepo3, vemo3);
const __m256 vey4 = _mm256_fnmadd_ps(vy4, vepo4, vemo4);
const __m256 vey5 = _mm256_fnmadd_ps(vy5, vepo5, vemo5);
const __m256 vey6 = _mm256_fnmadd_ps(vy6, vepo6, vemo6);
const __m256 vey7 = _mm256_fnmadd_ps(vy7, vepo7, vemo7);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm256_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm256_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm256_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm256_fmadd_ps(vey6, vrepo6, vy6);
vy7 = _mm256_fmadd_ps(vey7, vrepo7, vy7);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 15,612
| 40.97043
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1adj-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1adj_x72(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
__m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
vz7 = _mm256_max_ps(vsat_cutoff, vz7);
const __m256 vinvsignx8 = _mm256_xor_ps(vx8, vz8);
vz8 = _mm256_max_ps(vsat_cutoff, vz8);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
__m256 vp8 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vrepo7 = _mm256_rcp_ps(vepo7);
__m256 vrepo8 = _mm256_rcp_ps(vepo8);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
const __m256 verepo7 = _mm256_fnmsub_ps(vrepo7, vepo7, vminus_one);
const __m256 verepo8 = _mm256_fnmsub_ps(vrepo8, vepo8, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm256_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm256_fmadd_ps(verepo8, vrepo8, vrepo8);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
__m256 vy7 = _mm256_mul_ps(vemo7, vrepo7);
__m256 vy8 = _mm256_mul_ps(vemo8, vrepo8);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
const __m256 vey3 = _mm256_fnmadd_ps(vy3, vepo3, vemo3);
const __m256 vey4 = _mm256_fnmadd_ps(vy4, vepo4, vemo4);
const __m256 vey5 = _mm256_fnmadd_ps(vy5, vepo5, vemo5);
const __m256 vey6 = _mm256_fnmadd_ps(vy6, vepo6, vemo6);
const __m256 vey7 = _mm256_fnmadd_ps(vy7, vepo7, vemo7);
const __m256 vey8 = _mm256_fnmadd_ps(vy8, vepo8, vemo8);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm256_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm256_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm256_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm256_fmadd_ps(vey6, vrepo6, vy6);
vy7 = _mm256_fmadd_ps(vey7, vrepo7, vy7);
vy8 = _mm256_fmadd_ps(vey8, vrepo8, vy8);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
vy8 = _mm256_xor_ps(vy8, vinvsignx8);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 16,925
| 41.527638
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1adj-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1adj_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 4,998
| 33.006803
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx2-expm1minus-rr1-p6h5ts-nr1adj-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx2-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx2_expm1minus_rr1_p6h5ts_nr1adj_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p6h5.minus_one);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
__m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
__m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
__m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
__m256 vz9 = _mm256_or_ps(vx9, vsign_mask);
const __m256 vinvsignx0 = _mm256_xor_ps(vx0, vz0);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
const __m256 vinvsignx1 = _mm256_xor_ps(vx1, vz1);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
const __m256 vinvsignx2 = _mm256_xor_ps(vx2, vz2);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
const __m256 vinvsignx3 = _mm256_xor_ps(vx3, vz3);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
const __m256 vinvsignx4 = _mm256_xor_ps(vx4, vz4);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
const __m256 vinvsignx5 = _mm256_xor_ps(vx5, vz5);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
const __m256 vinvsignx6 = _mm256_xor_ps(vx6, vz6);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
const __m256 vinvsignx7 = _mm256_xor_ps(vx7, vz7);
vz7 = _mm256_max_ps(vsat_cutoff, vz7);
const __m256 vinvsignx8 = _mm256_xor_ps(vx8, vz8);
vz8 = _mm256_max_ps(vsat_cutoff, vz8);
const __m256 vinvsignx9 = _mm256_xor_ps(vx9, vz9);
vz9 = _mm256_max_ps(vsat_cutoff, vz9);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
const __m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
__m256 vp0 = vc6;
__m256 vp1 = vc6;
__m256 vp2 = vc6;
__m256 vp3 = vc6;
__m256 vp4 = vc6;
__m256 vp5 = vc6;
__m256 vp6 = vc6;
__m256 vp7 = vc6;
__m256 vp8 = vc6;
__m256 vp9 = vc6;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc5);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
vp9 = _mm256_fmadd_ps(vp9, vt9, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vts9 = _mm256_mul_ps(vt9, vs9);
const __m256 vsmo9 = _mm256_add_ps(vs9, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vemo9 = _mm256_fmadd_ps(vp9, vts9, vsmo9);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
const __m256 vepo9 = _mm256_add_ps(vemo9, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vrepo7 = _mm256_rcp_ps(vepo7);
__m256 vrepo8 = _mm256_rcp_ps(vepo8);
__m256 vrepo9 = _mm256_rcp_ps(vepo9);
const __m256 verepo0 = _mm256_fnmsub_ps(vrepo0, vepo0, vminus_one);
const __m256 verepo1 = _mm256_fnmsub_ps(vrepo1, vepo1, vminus_one);
const __m256 verepo2 = _mm256_fnmsub_ps(vrepo2, vepo2, vminus_one);
const __m256 verepo3 = _mm256_fnmsub_ps(vrepo3, vepo3, vminus_one);
const __m256 verepo4 = _mm256_fnmsub_ps(vrepo4, vepo4, vminus_one);
const __m256 verepo5 = _mm256_fnmsub_ps(vrepo5, vepo5, vminus_one);
const __m256 verepo6 = _mm256_fnmsub_ps(vrepo6, vepo6, vminus_one);
const __m256 verepo7 = _mm256_fnmsub_ps(vrepo7, vepo7, vminus_one);
const __m256 verepo8 = _mm256_fnmsub_ps(vrepo8, vepo8, vminus_one);
const __m256 verepo9 = _mm256_fnmsub_ps(vrepo9, vepo9, vminus_one);
vrepo0 = _mm256_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm256_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm256_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm256_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm256_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm256_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm256_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm256_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm256_fmadd_ps(verepo8, vrepo8, vrepo8);
vrepo9 = _mm256_fmadd_ps(verepo9, vrepo9, vrepo9);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
__m256 vy7 = _mm256_mul_ps(vemo7, vrepo7);
__m256 vy8 = _mm256_mul_ps(vemo8, vrepo8);
__m256 vy9 = _mm256_mul_ps(vemo9, vrepo9);
const __m256 vey0 = _mm256_fnmadd_ps(vy0, vepo0, vemo0);
const __m256 vey1 = _mm256_fnmadd_ps(vy1, vepo1, vemo1);
const __m256 vey2 = _mm256_fnmadd_ps(vy2, vepo2, vemo2);
const __m256 vey3 = _mm256_fnmadd_ps(vy3, vepo3, vemo3);
const __m256 vey4 = _mm256_fnmadd_ps(vy4, vepo4, vemo4);
const __m256 vey5 = _mm256_fnmadd_ps(vy5, vepo5, vemo5);
const __m256 vey6 = _mm256_fnmadd_ps(vy6, vepo6, vemo6);
const __m256 vey7 = _mm256_fnmadd_ps(vy7, vepo7, vemo7);
const __m256 vey8 = _mm256_fnmadd_ps(vy8, vepo8, vemo8);
const __m256 vey9 = _mm256_fnmadd_ps(vy9, vepo9, vemo9);
vy0 = _mm256_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm256_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm256_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm256_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm256_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm256_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm256_fmadd_ps(vey6, vrepo6, vy6);
vy7 = _mm256_fmadd_ps(vey7, vrepo7, vy7);
vy8 = _mm256_fmadd_ps(vey8, vrepo8, vy8);
vy9 = _mm256_fmadd_ps(vey9, vrepo9, vy9);
vy0 = _mm256_xor_ps(vy0, vinvsignx0);
vy1 = _mm256_xor_ps(vy1, vinvsignx1);
vy2 = _mm256_xor_ps(vy2, vinvsignx2);
vy3 = _mm256_xor_ps(vy3, vinvsignx3);
vy4 = _mm256_xor_ps(vy4, vinvsignx4);
vy5 = _mm256_xor_ps(vy5, vinvsignx5);
vy6 = _mm256_xor_ps(vy6, vinvsignx6);
vy7 = _mm256_xor_ps(vy7, vinvsignx7);
vy8 = _mm256_xor_ps(vy8, vinvsignx8);
vy9 = _mm256_xor_ps(vy9, vinvsignx9);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
_mm256_storeu_ps(output + 72, vy9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_expm1minus_rr1_p6h5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vz = _mm256_or_ps(vx, vsign_mask);
const __m256 vinvsignx = _mm256_xor_ps(vx, vz);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc6;
vp = _mm256_fmadd_ps(vp, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
const __m256 verepo = _mm256_fnmsub_ps(vrepo, vepo, vminus_one);
vrepo = _mm256_fmadd_ps(verepo, vrepo, vrepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
const __m256 vey = _mm256_fnmadd_ps(vy, vepo, vemo);
vy = _mm256_fmadd_ps(vey, vrepo, vy);
vy = _mm256_xor_ps(vy, vinvsignx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 18,238
| 42.016509
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-div-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_div_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 21);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 21);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 21);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512i vl3 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn3)));
const __m512i vl4 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn4)));
const __m512i vl5 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn5)));
const __m512i vl6 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn6)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 12,403
| 46.163498
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-div-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_div_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 21);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 21);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 21);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 21);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512i vl3 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn3)));
const __m512i vl4 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn4)));
const __m512i vl5 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn5)));
const __m512i vl6 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn6)));
const __m512i vl7 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn7)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
__m512 vy7 = _mm512_div_ps(vemo7, vepo7);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 13,519
| 46.943262
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-div-x144.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_div_x144(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
input += 144;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 21);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 21);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 21);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 21);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 21);
const __m512i ve8 = _mm512_slli_epi32(_mm512_castps_si512(vn8), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512i vl3 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn3)));
const __m512i vl4 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn4)));
const __m512i vl5 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn5)));
const __m512i vl6 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn6)));
const __m512i vl7 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn7)));
const __m512i vl8 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn8)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_add_epi32(vl8, ve8));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
__m512 vp8 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
__m512 vy7 = _mm512_div_ps(vemo7, vepo7);
__m512 vy8 = _mm512_div_ps(vemo8, vepo8);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
output += 144;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 14,635
| 47.624585
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 4,493
| 37.410256
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-div-x160.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_div_x160(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
input += 160;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
const __m512 vz9 = _mm512_range_ps(vsat_cutoff, vx9, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
__m512 vn9 = _mm512_fmadd_ps(vz9, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 21);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 21);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 21);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 21);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 21);
const __m512i ve8 = _mm512_slli_epi32(_mm512_castps_si512(vn8), 21);
const __m512i ve9 = _mm512_slli_epi32(_mm512_castps_si512(vn9), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512i vl3 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn3)));
const __m512i vl4 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn4)));
const __m512i vl5 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn5)));
const __m512i vl6 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn6)));
const __m512i vl7 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn7)));
const __m512i vl8 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn8)));
const __m512i vl9 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn9)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_add_epi32(vl8, ve8));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vs9 = _mm512_castsi512_ps(_mm512_add_epi32(vl9, ve9));
vn9 = _mm512_sub_ps(vn9, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
const __m512 vt9 = _mm512_fmadd_ps(vn9, vln2, vz9);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
__m512 vp8 = vc4;
__m512 vp9 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
vp9 = _mm512_fmadd_ps(vp9, vt9, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vts9 = _mm512_mul_ps(vt9, vs9);
const __m512 vsmo9 = _mm512_sub_ps(vs9, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vemo9 = _mm512_fmadd_ps(vp9, vts9, vsmo9);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
const __m512 vepo9 = _mm512_sub_ps(vemo9, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
__m512 vy7 = _mm512_div_ps(vemo7, vepo7);
__m512 vy8 = _mm512_div_ps(vemo8, vepo8);
__m512 vy9 = _mm512_div_ps(vemo9, vepo9);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
vy9 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy9), _mm512_castps_si512(vx9), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
_mm512_storeu_ps(output + 144, vy9);
output += 160;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 15,751
| 48.225
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-div-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_div_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 6,828
| 39.64881
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-div-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_div_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 7,942
| 41.475936
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-div-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_div_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512i vl3 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn3)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 9,056
| 42.966019
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-div-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_div_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 21);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512i vl3 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn3)));
const __m512i vl4 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn4)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 10,170
| 44.204444
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-div-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_div_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 21);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 21);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512i vl3 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn3)));
const __m512i vl4 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn4)));
const __m512i vl5 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn5)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 11,284
| 45.25
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-nr1adj-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_nr1adj_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 21);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 21);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 21);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512i vl3 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn3)));
const __m512i vl4 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn4)));
const __m512i vl5 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn5)));
const __m512i vl6 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn6)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
const __m512 vey5 = _mm512_fnmadd_ps(vy5, vepo5, vemo5);
const __m512 vey6 = _mm512_fnmadd_ps(vy6, vepo6, vemo6);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm512_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm512_fmadd_ps(vey6, vrepo6, vy6);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 14,832
| 46.694534
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-nr1adj-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_nr1adj_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 21);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 21);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 21);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 21);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512i vl3 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn3)));
const __m512i vl4 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn4)));
const __m512i vl5 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn5)));
const __m512i vl6 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn6)));
const __m512i vl7 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn7)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
__m512 vrepo7 = _mm512_rcp14_ps(vepo7);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
const __m512 verepo7 = _mm512_fnmadd_ps(vrepo7, vepo7, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm512_fmadd_ps(verepo7, vrepo7, vrepo7);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
__m512 vy7 = _mm512_mul_ps(vemo7, vrepo7);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
const __m512 vey5 = _mm512_fnmadd_ps(vy5, vepo5, vemo5);
const __m512 vey6 = _mm512_fnmadd_ps(vy6, vepo6, vemo6);
const __m512 vey7 = _mm512_fnmadd_ps(vy7, vepo7, vemo7);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm512_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm512_fmadd_ps(vey6, vrepo6, vy6);
vy7 = _mm512_fmadd_ps(vey7, vrepo7, vy7);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 16,221
| 47.423881
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-nr1adj-x144.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_nr1adj_x144(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
input += 144;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 21);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 21);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 21);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 21);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 21);
const __m512i ve8 = _mm512_slli_epi32(_mm512_castps_si512(vn8), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512i vl3 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn3)));
const __m512i vl4 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn4)));
const __m512i vl5 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn5)));
const __m512i vl6 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn6)));
const __m512i vl7 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn7)));
const __m512i vl8 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn8)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_add_epi32(vl8, ve8));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
__m512 vp8 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
__m512 vrepo7 = _mm512_rcp14_ps(vepo7);
__m512 vrepo8 = _mm512_rcp14_ps(vepo8);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
const __m512 verepo7 = _mm512_fnmadd_ps(vrepo7, vepo7, vone);
const __m512 verepo8 = _mm512_fnmadd_ps(vrepo8, vepo8, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm512_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm512_fmadd_ps(verepo8, vrepo8, vrepo8);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
__m512 vy7 = _mm512_mul_ps(vemo7, vrepo7);
__m512 vy8 = _mm512_mul_ps(vemo8, vrepo8);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
const __m512 vey5 = _mm512_fnmadd_ps(vy5, vepo5, vemo5);
const __m512 vey6 = _mm512_fnmadd_ps(vy6, vepo6, vemo6);
const __m512 vey7 = _mm512_fnmadd_ps(vy7, vepo7, vemo7);
const __m512 vey8 = _mm512_fnmadd_ps(vy8, vepo8, vemo8);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm512_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm512_fmadd_ps(vey6, vrepo6, vy6);
vy7 = _mm512_fmadd_ps(vey7, vrepo7, vy7);
vy8 = _mm512_fmadd_ps(vey8, vrepo8, vy8);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
output += 144;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 17,610
| 48.05571
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-nr1adj-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_nr1adj_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 5,010
| 37.844961
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-nr1adj-x160.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_nr1adj_x160(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
input += 160;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
const __m512 vz9 = _mm512_range_ps(vsat_cutoff, vx9, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
__m512 vn9 = _mm512_fmadd_ps(vz9, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 21);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 21);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 21);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 21);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 21);
const __m512i ve8 = _mm512_slli_epi32(_mm512_castps_si512(vn8), 21);
const __m512i ve9 = _mm512_slli_epi32(_mm512_castps_si512(vn9), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512i vl3 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn3)));
const __m512i vl4 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn4)));
const __m512i vl5 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn5)));
const __m512i vl6 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn6)));
const __m512i vl7 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn7)));
const __m512i vl8 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn8)));
const __m512i vl9 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn9)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_add_epi32(vl8, ve8));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vs9 = _mm512_castsi512_ps(_mm512_add_epi32(vl9, ve9));
vn9 = _mm512_sub_ps(vn9, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
const __m512 vt9 = _mm512_fmadd_ps(vn9, vln2, vz9);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
__m512 vp8 = vc4;
__m512 vp9 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
vp9 = _mm512_fmadd_ps(vp9, vt9, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vts9 = _mm512_mul_ps(vt9, vs9);
const __m512 vsmo9 = _mm512_sub_ps(vs9, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vemo9 = _mm512_fmadd_ps(vp9, vts9, vsmo9);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
const __m512 vepo9 = _mm512_sub_ps(vemo9, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
__m512 vrepo7 = _mm512_rcp14_ps(vepo7);
__m512 vrepo8 = _mm512_rcp14_ps(vepo8);
__m512 vrepo9 = _mm512_rcp14_ps(vepo9);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
const __m512 verepo7 = _mm512_fnmadd_ps(vrepo7, vepo7, vone);
const __m512 verepo8 = _mm512_fnmadd_ps(vrepo8, vepo8, vone);
const __m512 verepo9 = _mm512_fnmadd_ps(vrepo9, vepo9, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm512_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm512_fmadd_ps(verepo8, vrepo8, vrepo8);
vrepo9 = _mm512_fmadd_ps(verepo9, vrepo9, vrepo9);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
__m512 vy7 = _mm512_mul_ps(vemo7, vrepo7);
__m512 vy8 = _mm512_mul_ps(vemo8, vrepo8);
__m512 vy9 = _mm512_mul_ps(vemo9, vrepo9);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
const __m512 vey5 = _mm512_fnmadd_ps(vy5, vepo5, vemo5);
const __m512 vey6 = _mm512_fnmadd_ps(vy6, vepo6, vemo6);
const __m512 vey7 = _mm512_fnmadd_ps(vy7, vepo7, vemo7);
const __m512 vey8 = _mm512_fnmadd_ps(vy8, vepo8, vemo8);
const __m512 vey9 = _mm512_fnmadd_ps(vy9, vepo9, vemo9);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm512_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm512_fmadd_ps(vey6, vrepo6, vy6);
vy7 = _mm512_fmadd_ps(vey7, vrepo7, vy7);
vy8 = _mm512_fmadd_ps(vey8, vrepo8, vy8);
vy9 = _mm512_fmadd_ps(vey9, vrepo9, vy9);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
vy9 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy9), _mm512_castps_si512(vx9), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
_mm512_storeu_ps(output + 144, vy9);
output += 160;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 18,999
| 48.608355
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-nr1adj-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_nr1adj_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 7,892
| 40.324607
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-nr1adj-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_nr1adj_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 9,279
| 42.162791
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-nr1adj-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_nr1adj_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512i vl3 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn3)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 10,666
| 43.631799
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-nr1adj-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_nr1adj_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 21);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512i vl3 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn3)));
const __m512i vl4 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn4)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 12,053
| 44.8327
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut4-p4h3ts-perm-nr1adj-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut4_p4h3ts_perm_nr1adj_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.magic_bias);
const __m512 vtable = _mm512_load_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut4_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut4_p4h3_perm.sign_mask);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 21);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 21);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 21);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 21);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 21);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 21);
const __m512i vl0 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn0)));
const __m512i vl1 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn1)));
const __m512i vl2 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn2)));
const __m512i vl3 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn3)));
const __m512i vl4 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn4)));
const __m512i vl5 = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn5)));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
const __m512 vey5 = _mm512_fnmadd_ps(vy5, vepo5, vemo5);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm512_fmadd_ps(vey5, vrepo5, vy5);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 21);
const __m512i vl = _mm512_castps_si512(_mm512_permutevar_ps(vtable, _mm512_castps_si512(vn)));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 13,440
| 45.832753
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-div-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_div_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vidx3 = _mm512_and_si512(_mm512_castps_si512(vn3), vindex_mask);
const __m512i vidx4 = _mm512_and_si512(_mm512_castps_si512(vn4), vindex_mask);
const __m512i vidx5 = _mm512_and_si512(_mm512_castps_si512(vn5), vindex_mask);
const __m512i vidx6 = _mm512_and_si512(_mm512_castps_si512(vn6), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl3 = _mm512_i32gather_epi32(vidx3, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl4 = _mm512_i32gather_epi32(vidx4, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl5 = _mm512_i32gather_epi32(vidx5, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl6 = _mm512_i32gather_epi32(vidx6, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 13,279
| 47.290909
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-div-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_div_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vidx3 = _mm512_and_si512(_mm512_castps_si512(vn3), vindex_mask);
const __m512i vidx4 = _mm512_and_si512(_mm512_castps_si512(vn4), vindex_mask);
const __m512i vidx5 = _mm512_and_si512(_mm512_castps_si512(vn5), vindex_mask);
const __m512i vidx6 = _mm512_and_si512(_mm512_castps_si512(vn6), vindex_mask);
const __m512i vidx7 = _mm512_and_si512(_mm512_castps_si512(vn7), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl3 = _mm512_i32gather_epi32(vidx3, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl4 = _mm512_i32gather_epi32(vidx4, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl5 = _mm512_i32gather_epi32(vidx5, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl6 = _mm512_i32gather_epi32(vidx6, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl7 = _mm512_i32gather_epi32(vidx7, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
__m512 vy7 = _mm512_div_ps(vemo7, vepo7);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 14,480
| 48.088136
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-div-x144.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_div_x144(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
input += 144;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 20);
const __m512i ve8 = _mm512_slli_epi32(_mm512_castps_si512(vn8), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vidx3 = _mm512_and_si512(_mm512_castps_si512(vn3), vindex_mask);
const __m512i vidx4 = _mm512_and_si512(_mm512_castps_si512(vn4), vindex_mask);
const __m512i vidx5 = _mm512_and_si512(_mm512_castps_si512(vn5), vindex_mask);
const __m512i vidx6 = _mm512_and_si512(_mm512_castps_si512(vn6), vindex_mask);
const __m512i vidx7 = _mm512_and_si512(_mm512_castps_si512(vn7), vindex_mask);
const __m512i vidx8 = _mm512_and_si512(_mm512_castps_si512(vn8), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl3 = _mm512_i32gather_epi32(vidx3, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl4 = _mm512_i32gather_epi32(vidx4, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl5 = _mm512_i32gather_epi32(vidx5, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl6 = _mm512_i32gather_epi32(vidx6, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl7 = _mm512_i32gather_epi32(vidx7, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl8 = _mm512_i32gather_epi32(vidx8, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_add_epi32(vl8, ve8));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
__m512 vp8 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
__m512 vy7 = _mm512_div_ps(vemo7, vepo7);
__m512 vy8 = _mm512_div_ps(vemo8, vepo8);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
output += 144;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 15,681
| 48.784127
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 4,774
| 38.139344
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-div-x160.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_div_x160(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
input += 160;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
const __m512 vz9 = _mm512_range_ps(vsat_cutoff, vx9, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
__m512 vn9 = _mm512_fmadd_ps(vz9, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 20);
const __m512i ve8 = _mm512_slli_epi32(_mm512_castps_si512(vn8), 20);
const __m512i ve9 = _mm512_slli_epi32(_mm512_castps_si512(vn9), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vidx3 = _mm512_and_si512(_mm512_castps_si512(vn3), vindex_mask);
const __m512i vidx4 = _mm512_and_si512(_mm512_castps_si512(vn4), vindex_mask);
const __m512i vidx5 = _mm512_and_si512(_mm512_castps_si512(vn5), vindex_mask);
const __m512i vidx6 = _mm512_and_si512(_mm512_castps_si512(vn6), vindex_mask);
const __m512i vidx7 = _mm512_and_si512(_mm512_castps_si512(vn7), vindex_mask);
const __m512i vidx8 = _mm512_and_si512(_mm512_castps_si512(vn8), vindex_mask);
const __m512i vidx9 = _mm512_and_si512(_mm512_castps_si512(vn9), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl3 = _mm512_i32gather_epi32(vidx3, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl4 = _mm512_i32gather_epi32(vidx4, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl5 = _mm512_i32gather_epi32(vidx5, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl6 = _mm512_i32gather_epi32(vidx6, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl7 = _mm512_i32gather_epi32(vidx7, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl8 = _mm512_i32gather_epi32(vidx8, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl9 = _mm512_i32gather_epi32(vidx9, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_add_epi32(vl8, ve8));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vs9 = _mm512_castsi512_ps(_mm512_add_epi32(vl9, ve9));
vn9 = _mm512_sub_ps(vn9, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
const __m512 vt9 = _mm512_fmadd_ps(vn9, vln2, vz9);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
__m512 vp8 = vc4;
__m512 vp9 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
vp9 = _mm512_fmadd_ps(vp9, vt9, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vts9 = _mm512_mul_ps(vt9, vs9);
const __m512 vsmo9 = _mm512_sub_ps(vs9, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vemo9 = _mm512_fmadd_ps(vp9, vts9, vsmo9);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
const __m512 vepo9 = _mm512_sub_ps(vemo9, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
__m512 vy7 = _mm512_div_ps(vemo7, vepo7);
__m512 vy8 = _mm512_div_ps(vemo8, vepo8);
__m512 vy9 = _mm512_div_ps(vemo9, vepo9);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
vy9 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy9), _mm512_castps_si512(vx9), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
_mm512_storeu_ps(output + 144, vy9);
output += 160;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 16,882
| 49.397015
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-div-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_div_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 7,279
| 40.6
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-div-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_div_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 8,478
| 42.482051
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-div-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_div_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vidx3 = _mm512_and_si512(_mm512_castps_si512(vn3), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl3 = _mm512_i32gather_epi32(vidx3, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 9,677
| 44.013953
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-div-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_div_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vidx3 = _mm512_and_si512(_mm512_castps_si512(vn3), vindex_mask);
const __m512i vidx4 = _mm512_and_si512(_mm512_castps_si512(vn4), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl3 = _mm512_i32gather_epi32(vidx3, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl4 = _mm512_i32gather_epi32(vidx4, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 10,876
| 45.285106
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-div-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_div_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vidx3 = _mm512_and_si512(_mm512_castps_si512(vn3), vindex_mask);
const __m512i vidx4 = _mm512_and_si512(_mm512_castps_si512(vn4), vindex_mask);
const __m512i vidx5 = _mm512_and_si512(_mm512_castps_si512(vn5), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl3 = _mm512_i32gather_epi32(vidx3, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl4 = _mm512_i32gather_epi32(vidx4, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl5 = _mm512_i32gather_epi32(vidx5, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 12,075
| 46.356863
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-nr1adj-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_nr1adj_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vidx3 = _mm512_and_si512(_mm512_castps_si512(vn3), vindex_mask);
const __m512i vidx4 = _mm512_and_si512(_mm512_castps_si512(vn4), vindex_mask);
const __m512i vidx5 = _mm512_and_si512(_mm512_castps_si512(vn5), vindex_mask);
const __m512i vidx6 = _mm512_and_si512(_mm512_castps_si512(vn6), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl3 = _mm512_i32gather_epi32(vidx3, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl4 = _mm512_i32gather_epi32(vidx4, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl5 = _mm512_i32gather_epi32(vidx5, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl6 = _mm512_i32gather_epi32(vidx6, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
const __m512 vey5 = _mm512_fnmadd_ps(vy5, vepo5, vemo5);
const __m512 vey6 = _mm512_fnmadd_ps(vy6, vepo6, vemo6);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm512_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm512_fmadd_ps(vey6, vrepo6, vy6);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 15,708
| 47.634675
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-nr1adj-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_nr1adj_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vidx3 = _mm512_and_si512(_mm512_castps_si512(vn3), vindex_mask);
const __m512i vidx4 = _mm512_and_si512(_mm512_castps_si512(vn4), vindex_mask);
const __m512i vidx5 = _mm512_and_si512(_mm512_castps_si512(vn5), vindex_mask);
const __m512i vidx6 = _mm512_and_si512(_mm512_castps_si512(vn6), vindex_mask);
const __m512i vidx7 = _mm512_and_si512(_mm512_castps_si512(vn7), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl3 = _mm512_i32gather_epi32(vidx3, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl4 = _mm512_i32gather_epi32(vidx4, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl5 = _mm512_i32gather_epi32(vidx5, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl6 = _mm512_i32gather_epi32(vidx6, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl7 = _mm512_i32gather_epi32(vidx7, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
__m512 vrepo7 = _mm512_rcp14_ps(vepo7);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
const __m512 verepo7 = _mm512_fnmadd_ps(vrepo7, vepo7, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm512_fmadd_ps(verepo7, vrepo7, vrepo7);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
__m512 vy7 = _mm512_mul_ps(vemo7, vrepo7);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
const __m512 vey5 = _mm512_fnmadd_ps(vy5, vepo5, vemo5);
const __m512 vey6 = _mm512_fnmadd_ps(vy6, vepo6, vemo6);
const __m512 vey7 = _mm512_fnmadd_ps(vy7, vepo7, vemo7);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm512_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm512_fmadd_ps(vey6, vrepo6, vy6);
vy7 = _mm512_fmadd_ps(vey7, vrepo7, vy7);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 17,182
| 48.376437
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-nr1adj-x144.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_nr1adj_x144(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
input += 144;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 20);
const __m512i ve8 = _mm512_slli_epi32(_mm512_castps_si512(vn8), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vidx3 = _mm512_and_si512(_mm512_castps_si512(vn3), vindex_mask);
const __m512i vidx4 = _mm512_and_si512(_mm512_castps_si512(vn4), vindex_mask);
const __m512i vidx5 = _mm512_and_si512(_mm512_castps_si512(vn5), vindex_mask);
const __m512i vidx6 = _mm512_and_si512(_mm512_castps_si512(vn6), vindex_mask);
const __m512i vidx7 = _mm512_and_si512(_mm512_castps_si512(vn7), vindex_mask);
const __m512i vidx8 = _mm512_and_si512(_mm512_castps_si512(vn8), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl3 = _mm512_i32gather_epi32(vidx3, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl4 = _mm512_i32gather_epi32(vidx4, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl5 = _mm512_i32gather_epi32(vidx5, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl6 = _mm512_i32gather_epi32(vidx6, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl7 = _mm512_i32gather_epi32(vidx7, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl8 = _mm512_i32gather_epi32(vidx8, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_add_epi32(vl8, ve8));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
__m512 vp8 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
__m512 vrepo7 = _mm512_rcp14_ps(vepo7);
__m512 vrepo8 = _mm512_rcp14_ps(vepo8);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
const __m512 verepo7 = _mm512_fnmadd_ps(vrepo7, vepo7, vone);
const __m512 verepo8 = _mm512_fnmadd_ps(vrepo8, vepo8, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm512_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm512_fmadd_ps(verepo8, vrepo8, vrepo8);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
__m512 vy7 = _mm512_mul_ps(vemo7, vrepo7);
__m512 vy8 = _mm512_mul_ps(vemo8, vrepo8);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
const __m512 vey5 = _mm512_fnmadd_ps(vy5, vepo5, vemo5);
const __m512 vey6 = _mm512_fnmadd_ps(vy6, vepo6, vemo6);
const __m512 vey7 = _mm512_fnmadd_ps(vy7, vepo7, vemo7);
const __m512 vey8 = _mm512_fnmadd_ps(vy8, vepo8, vemo8);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm512_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm512_fmadd_ps(vey6, vrepo6, vy6);
vy7 = _mm512_fmadd_ps(vey7, vrepo7, vy7);
vy8 = _mm512_fmadd_ps(vey8, vrepo8, vy8);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
output += 144;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 18,656
| 49.018767
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-nr1adj-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_nr1adj_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 5,291
| 38.492537
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-nr1adj-x160.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_nr1adj_x160(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
input += 160;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
const __m512 vz9 = _mm512_range_ps(vsat_cutoff, vx9, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
__m512 vn9 = _mm512_fmadd_ps(vz9, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 20);
const __m512i ve8 = _mm512_slli_epi32(_mm512_castps_si512(vn8), 20);
const __m512i ve9 = _mm512_slli_epi32(_mm512_castps_si512(vn9), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vidx3 = _mm512_and_si512(_mm512_castps_si512(vn3), vindex_mask);
const __m512i vidx4 = _mm512_and_si512(_mm512_castps_si512(vn4), vindex_mask);
const __m512i vidx5 = _mm512_and_si512(_mm512_castps_si512(vn5), vindex_mask);
const __m512i vidx6 = _mm512_and_si512(_mm512_castps_si512(vn6), vindex_mask);
const __m512i vidx7 = _mm512_and_si512(_mm512_castps_si512(vn7), vindex_mask);
const __m512i vidx8 = _mm512_and_si512(_mm512_castps_si512(vn8), vindex_mask);
const __m512i vidx9 = _mm512_and_si512(_mm512_castps_si512(vn9), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl3 = _mm512_i32gather_epi32(vidx3, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl4 = _mm512_i32gather_epi32(vidx4, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl5 = _mm512_i32gather_epi32(vidx5, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl6 = _mm512_i32gather_epi32(vidx6, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl7 = _mm512_i32gather_epi32(vidx7, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl8 = _mm512_i32gather_epi32(vidx8, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl9 = _mm512_i32gather_epi32(vidx9, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_add_epi32(vl8, ve8));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vs9 = _mm512_castsi512_ps(_mm512_add_epi32(vl9, ve9));
vn9 = _mm512_sub_ps(vn9, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
const __m512 vt9 = _mm512_fmadd_ps(vn9, vln2, vz9);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
__m512 vp8 = vc4;
__m512 vp9 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
vp9 = _mm512_fmadd_ps(vp9, vt9, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vts9 = _mm512_mul_ps(vt9, vs9);
const __m512 vsmo9 = _mm512_sub_ps(vs9, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vemo9 = _mm512_fmadd_ps(vp9, vts9, vsmo9);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
const __m512 vepo9 = _mm512_sub_ps(vemo9, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
__m512 vrepo7 = _mm512_rcp14_ps(vepo7);
__m512 vrepo8 = _mm512_rcp14_ps(vepo8);
__m512 vrepo9 = _mm512_rcp14_ps(vepo9);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
const __m512 verepo7 = _mm512_fnmadd_ps(vrepo7, vepo7, vone);
const __m512 verepo8 = _mm512_fnmadd_ps(vrepo8, vepo8, vone);
const __m512 verepo9 = _mm512_fnmadd_ps(vrepo9, vepo9, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm512_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm512_fmadd_ps(verepo8, vrepo8, vrepo8);
vrepo9 = _mm512_fmadd_ps(verepo9, vrepo9, vrepo9);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
__m512 vy7 = _mm512_mul_ps(vemo7, vrepo7);
__m512 vy8 = _mm512_mul_ps(vemo8, vrepo8);
__m512 vy9 = _mm512_mul_ps(vemo9, vrepo9);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
const __m512 vey5 = _mm512_fnmadd_ps(vy5, vepo5, vemo5);
const __m512 vey6 = _mm512_fnmadd_ps(vy6, vepo6, vemo6);
const __m512 vey7 = _mm512_fnmadd_ps(vy7, vepo7, vemo7);
const __m512 vey8 = _mm512_fnmadd_ps(vy8, vepo8, vemo8);
const __m512 vey9 = _mm512_fnmadd_ps(vy9, vepo9, vemo9);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm512_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm512_fmadd_ps(vey6, vrepo6, vy6);
vy7 = _mm512_fmadd_ps(vey7, vrepo7, vy7);
vy8 = _mm512_fmadd_ps(vey8, vrepo8, vy8);
vy9 = _mm512_fmadd_ps(vey9, vrepo9, vy9);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
vy9 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy9), _mm512_castps_si512(vx9), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
_mm512_storeu_ps(output + 144, vy9);
output += 160;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 20,130
| 49.580402
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-nr1adj-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_nr1adj_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 8,343
| 41.141414
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-nr1adj-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_nr1adj_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 9,815
| 43.017937
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-nr1adj-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_nr1adj_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vidx3 = _mm512_and_si512(_mm512_castps_si512(vn3), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl3 = _mm512_i32gather_epi32(vidx3, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 11,287
| 44.516129
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-nr1adj-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_nr1adj_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vidx3 = _mm512_and_si512(_mm512_castps_si512(vn3), vindex_mask);
const __m512i vidx4 = _mm512_and_si512(_mm512_castps_si512(vn4), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl3 = _mm512_i32gather_epi32(vidx3, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl4 = _mm512_i32gather_epi32(vidx4, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 12,759
| 45.739927
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-gather-nr1adj-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_gather_nr1adj_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.magic_bias);
const __m512i vindex_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.index_mask);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3.sign_mask);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i vidx0 = _mm512_and_si512(_mm512_castps_si512(vn0), vindex_mask);
const __m512i vidx1 = _mm512_and_si512(_mm512_castps_si512(vn1), vindex_mask);
const __m512i vidx2 = _mm512_and_si512(_mm512_castps_si512(vn2), vindex_mask);
const __m512i vidx3 = _mm512_and_si512(_mm512_castps_si512(vn3), vindex_mask);
const __m512i vidx4 = _mm512_and_si512(_mm512_castps_si512(vn4), vindex_mask);
const __m512i vidx5 = _mm512_and_si512(_mm512_castps_si512(vn5), vindex_mask);
const __m512i vl0 = _mm512_i32gather_epi32(vidx0, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl1 = _mm512_i32gather_epi32(vidx1, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl2 = _mm512_i32gather_epi32(vidx2, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl3 = _mm512_i32gather_epi32(vidx3, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl4 = _mm512_i32gather_epi32(vidx4, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512i vl5 = _mm512_i32gather_epi32(vidx5, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
const __m512 vey5 = _mm512_fnmadd_ps(vy5, vepo5, vemo5);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm512_fmadd_ps(vey5, vrepo5, vy5);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vidx = _mm512_and_si512(_mm512_castps_si512(vn), vindex_mask);
const __m512i vl = _mm512_i32gather_epi32(vidx, xnn_table_exp2minus_k_over_8, sizeof(uint32_t));
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 14,231
| 46.758389
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-div-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_div_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
const __m512i vl6 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn6), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 12,254
| 45.596958
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-div-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_div_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
const __m512i vl6 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn6), vtable);
const __m512i vl7 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn7), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
__m512 vy7 = _mm512_div_ps(vemo7, vepo7);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 13,353
| 46.35461
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-div-x144.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_div_x144(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
input += 144;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 20);
const __m512i ve8 = _mm512_slli_epi32(_mm512_castps_si512(vn8), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
const __m512i vl6 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn6), vtable);
const __m512i vl7 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn7), vtable);
const __m512i vl8 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn8), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_add_epi32(vl8, ve8));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
__m512 vp8 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
__m512 vy7 = _mm512_div_ps(vemo7, vepo7);
__m512 vy8 = _mm512_div_ps(vemo8, vepo8);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
output += 144;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 14,452
| 47.016611
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 4,463
| 37.153846
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-div-x160.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_div_x160(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
input += 160;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
const __m512 vz9 = _mm512_range_ps(vsat_cutoff, vx9, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
__m512 vn9 = _mm512_fmadd_ps(vz9, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 20);
const __m512i ve8 = _mm512_slli_epi32(_mm512_castps_si512(vn8), 20);
const __m512i ve9 = _mm512_slli_epi32(_mm512_castps_si512(vn9), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
const __m512i vl6 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn6), vtable);
const __m512i vl7 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn7), vtable);
const __m512i vl8 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn8), vtable);
const __m512i vl9 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn9), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_add_epi32(vl8, ve8));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vs9 = _mm512_castsi512_ps(_mm512_add_epi32(vl9, ve9));
vn9 = _mm512_sub_ps(vn9, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
const __m512 vt9 = _mm512_fmadd_ps(vn9, vln2, vz9);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
__m512 vp8 = vc4;
__m512 vp9 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
vp9 = _mm512_fmadd_ps(vp9, vt9, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vts9 = _mm512_mul_ps(vt9, vs9);
const __m512 vsmo9 = _mm512_sub_ps(vs9, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vemo9 = _mm512_fmadd_ps(vp9, vts9, vsmo9);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
const __m512 vepo9 = _mm512_sub_ps(vemo9, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
__m512 vy7 = _mm512_div_ps(vemo7, vepo7);
__m512 vy8 = _mm512_div_ps(vemo8, vepo8);
__m512 vy9 = _mm512_div_ps(vemo9, vepo9);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
vy9 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy9), _mm512_castps_si512(vx9), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
_mm512_storeu_ps(output + 144, vy9);
output += 160;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 15,551
| 47.6
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-div-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_div_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 6,764
| 39.267857
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-div-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_div_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 7,861
| 41.042781
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-div-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_div_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 8,958
| 42.490291
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-div-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_div_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 10,055
| 43.693333
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-div-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_div_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 11,152
| 44.709016
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-nr1adj-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_nr1adj_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
const __m512i vl6 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn6), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
const __m512 vey5 = _mm512_fnmadd_ps(vy5, vepo5, vemo5);
const __m512 vey6 = _mm512_fnmadd_ps(vy6, vepo6, vemo6);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm512_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm512_fmadd_ps(vey6, vrepo6, vy6);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 14,683
| 46.215434
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-nr1adj-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_nr1adj_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
const __m512i vl6 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn6), vtable);
const __m512i vl7 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn7), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
__m512 vrepo7 = _mm512_rcp14_ps(vepo7);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
const __m512 verepo7 = _mm512_fnmadd_ps(vrepo7, vepo7, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm512_fmadd_ps(verepo7, vrepo7, vrepo7);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
__m512 vy7 = _mm512_mul_ps(vemo7, vrepo7);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
const __m512 vey5 = _mm512_fnmadd_ps(vy5, vepo5, vemo5);
const __m512 vey6 = _mm512_fnmadd_ps(vy6, vepo6, vemo6);
const __m512 vey7 = _mm512_fnmadd_ps(vy7, vepo7, vemo7);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm512_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm512_fmadd_ps(vey6, vrepo6, vy6);
vy7 = _mm512_fmadd_ps(vey7, vrepo7, vy7);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 16,055
| 46.928358
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-nr1adj-x144.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_nr1adj_x144(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
input += 144;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 20);
const __m512i ve8 = _mm512_slli_epi32(_mm512_castps_si512(vn8), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
const __m512i vl6 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn6), vtable);
const __m512i vl7 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn7), vtable);
const __m512i vl8 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn8), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_add_epi32(vl8, ve8));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
__m512 vp8 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
__m512 vrepo7 = _mm512_rcp14_ps(vepo7);
__m512 vrepo8 = _mm512_rcp14_ps(vepo8);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
const __m512 verepo7 = _mm512_fnmadd_ps(vrepo7, vepo7, vone);
const __m512 verepo8 = _mm512_fnmadd_ps(vrepo8, vepo8, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm512_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm512_fmadd_ps(verepo8, vrepo8, vrepo8);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
__m512 vy7 = _mm512_mul_ps(vemo7, vrepo7);
__m512 vy8 = _mm512_mul_ps(vemo8, vrepo8);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
const __m512 vey5 = _mm512_fnmadd_ps(vy5, vepo5, vemo5);
const __m512 vey6 = _mm512_fnmadd_ps(vy6, vepo6, vemo6);
const __m512 vey7 = _mm512_fnmadd_ps(vy7, vepo7, vemo7);
const __m512 vey8 = _mm512_fnmadd_ps(vy8, vepo8, vemo8);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm512_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm512_fmadd_ps(vey6, vrepo6, vy6);
vy7 = _mm512_fmadd_ps(vey7, vrepo7, vy7);
vy8 = _mm512_fmadd_ps(vey8, vrepo8, vy8);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
output += 144;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 17,427
| 47.545961
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-nr1adj-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_nr1adj_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 4,980
| 37.612403
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-nr1adj-x160.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_nr1adj_x160(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
input += 160;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
const __m512 vz9 = _mm512_range_ps(vsat_cutoff, vx9, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
__m512 vn9 = _mm512_fmadd_ps(vz9, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i ve6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 20);
const __m512i ve7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 20);
const __m512i ve8 = _mm512_slli_epi32(_mm512_castps_si512(vn8), 20);
const __m512i ve9 = _mm512_slli_epi32(_mm512_castps_si512(vn9), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
const __m512i vl6 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn6), vtable);
const __m512i vl7 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn7), vtable);
const __m512i vl8 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn8), vtable);
const __m512i vl9 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn9), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ve6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ve7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_add_epi32(vl8, ve8));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vs9 = _mm512_castsi512_ps(_mm512_add_epi32(vl9, ve9));
vn9 = _mm512_sub_ps(vn9, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
const __m512 vt9 = _mm512_fmadd_ps(vn9, vln2, vz9);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
__m512 vp6 = vc4;
__m512 vp7 = vc4;
__m512 vp8 = vc4;
__m512 vp9 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
vp9 = _mm512_fmadd_ps(vp9, vt9, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vts9 = _mm512_mul_ps(vt9, vs9);
const __m512 vsmo9 = _mm512_sub_ps(vs9, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vemo9 = _mm512_fmadd_ps(vp9, vts9, vsmo9);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
const __m512 vepo9 = _mm512_sub_ps(vemo9, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
__m512 vrepo7 = _mm512_rcp14_ps(vepo7);
__m512 vrepo8 = _mm512_rcp14_ps(vepo8);
__m512 vrepo9 = _mm512_rcp14_ps(vepo9);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
const __m512 verepo7 = _mm512_fnmadd_ps(vrepo7, vepo7, vone);
const __m512 verepo8 = _mm512_fnmadd_ps(vrepo8, vepo8, vone);
const __m512 verepo9 = _mm512_fnmadd_ps(vrepo9, vepo9, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm512_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm512_fmadd_ps(verepo8, vrepo8, vrepo8);
vrepo9 = _mm512_fmadd_ps(verepo9, vrepo9, vrepo9);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
__m512 vy7 = _mm512_mul_ps(vemo7, vrepo7);
__m512 vy8 = _mm512_mul_ps(vemo8, vrepo8);
__m512 vy9 = _mm512_mul_ps(vemo9, vrepo9);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
const __m512 vey5 = _mm512_fnmadd_ps(vy5, vepo5, vemo5);
const __m512 vey6 = _mm512_fnmadd_ps(vy6, vepo6, vemo6);
const __m512 vey7 = _mm512_fnmadd_ps(vy7, vepo7, vemo7);
const __m512 vey8 = _mm512_fnmadd_ps(vy8, vepo8, vemo8);
const __m512 vey9 = _mm512_fnmadd_ps(vy9, vepo9, vemo9);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm512_fmadd_ps(vey5, vrepo5, vy5);
vy6 = _mm512_fmadd_ps(vey6, vrepo6, vy6);
vy7 = _mm512_fmadd_ps(vey7, vrepo7, vy7);
vy8 = _mm512_fmadd_ps(vey8, vrepo8, vy8);
vy9 = _mm512_fmadd_ps(vey9, vrepo9, vy9);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
vy9 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy9), _mm512_castps_si512(vx9), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
_mm512_storeu_ps(output + 144, vy9);
output += 160;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 18,799
| 48.086162
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-nr1adj-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_nr1adj_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 7,828
| 39.989529
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-nr1adj-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_nr1adj_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 9,198
| 41.786047
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-nr1adj-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_nr1adj_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 10,568
| 43.221757
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-nr1adj-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_nr1adj_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 11,938
| 44.395437
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-lut8-p4h3ts-perm-nr1adj-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_lut8_p4h3ts_perm_nr1adj_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.magic_bias);
const __m512i vtable = _mm512_load_si512(params->avx512_expm1minus_rr1_lut8_p4h3_perm.table);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.ln2);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_lut8_p4h3_perm.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_lut8_p4h3_perm.sign_mask);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
const __m512i ve0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 20);
const __m512i ve1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 20);
const __m512i ve2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 20);
const __m512i ve3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 20);
const __m512i ve4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 20);
const __m512i ve5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 20);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ve0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ve1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ve2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ve3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ve4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ve5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
__m512 vp0 = vc4;
__m512 vp1 = vc4;
__m512 vp2 = vc4;
__m512 vp3 = vc4;
__m512 vp4 = vc4;
__m512 vp5 = vc4;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
const __m512 vey0 = _mm512_fnmadd_ps(vy0, vepo0, vemo0);
const __m512 vey1 = _mm512_fnmadd_ps(vy1, vepo1, vemo1);
const __m512 vey2 = _mm512_fnmadd_ps(vy2, vepo2, vemo2);
const __m512 vey3 = _mm512_fnmadd_ps(vy3, vepo3, vemo3);
const __m512 vey4 = _mm512_fnmadd_ps(vy4, vepo4, vemo4);
const __m512 vey5 = _mm512_fnmadd_ps(vy5, vepo5, vemo5);
vy0 = _mm512_fmadd_ps(vey0, vrepo0, vy0);
vy1 = _mm512_fmadd_ps(vey1, vrepo1, vy1);
vy2 = _mm512_fmadd_ps(vey2, vrepo2, vy2);
vy3 = _mm512_fmadd_ps(vey3, vrepo3, vy3);
vy4 = _mm512_fmadd_ps(vey4, vrepo4, vy4);
vy5 = _mm512_fmadd_ps(vey5, vrepo5, vy5);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512i ve = _mm512_slli_epi32(_mm512_castps_si512(vn), 20);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ve));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc4;
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
const __m512 vey = _mm512_fnmadd_ps(vy, vepo, vemo);
vy = _mm512_fmadd_ps(vey, vrepo, vy);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 13,308
| 45.372822
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-div-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_div_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn6), 23));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
__m512 vp3 = vc6;
__m512 vp4 = vc6;
__m512 vp5 = vc6;
__m512 vp6 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 11,724
| 44.445736
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-div-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_div_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn6), 23));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn7), 23));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
__m512 vp3 = vc6;
__m512 vp4 = vc6;
__m512 vp5 = vc6;
__m512 vp6 = vc6;
__m512 vp7 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
__m512 vy7 = _mm512_div_ps(vemo7, vepo7);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 12,771
| 45.108303
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-div-x144.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_div_x144(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
input += 144;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn6), 23));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn7), 23));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn8), 23));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
__m512 vp3 = vc6;
__m512 vp4 = vc6;
__m512 vp5 = vc6;
__m512 vp6 = vc6;
__m512 vp7 = vc6;
__m512 vp8 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
__m512 vy7 = _mm512_div_ps(vemo7, vepo7);
__m512 vy8 = _mm512_div_ps(vemo8, vepo8);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
output += 144;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 13,818
| 45.685811
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 4,299
| 36.719298
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-div-x160.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_div_x160(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
input += 160;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
const __m512 vz9 = _mm512_range_ps(vsat_cutoff, vx9, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
__m512 vn9 = _mm512_fmadd_ps(vz9, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn6), 23));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn7), 23));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn8), 23));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vs9 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn9), 23));
vn9 = _mm512_sub_ps(vn9, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
const __m512 vt9 = _mm512_fmadd_ps(vn9, vln2, vz9);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
__m512 vp3 = vc6;
__m512 vp4 = vc6;
__m512 vp5 = vc6;
__m512 vp6 = vc6;
__m512 vp7 = vc6;
__m512 vp8 = vc6;
__m512 vp9 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc5);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc4);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
vp9 = _mm512_fmadd_ps(vp9, vt9, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vts9 = _mm512_mul_ps(vt9, vs9);
const __m512 vsmo9 = _mm512_sub_ps(vs9, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vemo9 = _mm512_fmadd_ps(vp9, vts9, vsmo9);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
const __m512 vepo9 = _mm512_sub_ps(vemo9, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
__m512 vy6 = _mm512_div_ps(vemo6, vepo6);
__m512 vy7 = _mm512_div_ps(vemo7, vepo7);
__m512 vy8 = _mm512_div_ps(vemo8, vepo8);
__m512 vy9 = _mm512_div_ps(vemo9, vepo9);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
vy9 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy9), _mm512_castps_si512(vx9), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
_mm512_storeu_ps(output + 144, vy9);
output += 160;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 14,865
| 46.193651
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-div-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_div_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 6,494
| 38.846626
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-div-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_div_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 7,539
| 40.428571
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-div-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_div_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
__m512 vp3 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 8,584
| 41.711443
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-div-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_div_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
__m512 vp3 = vc6;
__m512 vp4 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 9,629
| 42.772727
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-div-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_div_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
__m512 vp3 = vc6;
__m512 vp4 = vc6;
__m512 vp5 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
__m512 vy0 = _mm512_div_ps(vemo0, vepo0);
__m512 vy1 = _mm512_div_ps(vemo1, vepo1);
__m512 vy2 = _mm512_div_ps(vemo2, vepo2);
__m512 vy3 = _mm512_div_ps(vemo3, vepo3);
__m512 vy4 = _mm512_div_ps(vemo4, vepo4);
__m512 vy5 = _mm512_div_ps(vemo5, vepo5);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vy = _mm512_div_ps(vemo, vepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 10,674
| 43.665272
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-nr1-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_nr1_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn6), 23));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
__m512 vp3 = vc6;
__m512 vp4 = vc6;
__m512 vp5 = vc6;
__m512 vp6 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 13,203
| 44.847222
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-nr1-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_nr1_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn6), 23));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn7), 23));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
__m512 vp3 = vc6;
__m512 vp4 = vc6;
__m512 vp5 = vc6;
__m512 vp6 = vc6;
__m512 vp7 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
__m512 vrepo7 = _mm512_rcp14_ps(vepo7);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
const __m512 verepo7 = _mm512_fnmadd_ps(vrepo7, vepo7, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm512_fmadd_ps(verepo7, vrepo7, vrepo7);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
__m512 vy7 = _mm512_mul_ps(vemo7, vrepo7);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 14,416
| 45.506452
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vtanh/gen/f32-vtanh-avx512skx-expm1minus-rr1-p6h5ts-nr1-x144.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vtanh/avx512skx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f32_vtanh_ukernel__avx512skx_expm1minus_rr1_p6h5ts_nr1_x144(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.sat_cutoff);
const __m512 vminus_log2e = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_log2e);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.magic_bias);
const __m512 vln2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.c2);
const __m512 vminus_two = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.minus_two);
const __m512 vone = _mm512_set1_ps(params->avx512_expm1minus_rr1_p6h5.one);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_expm1minus_rr1_p6h5.sign_mask);
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
input += 144;
const __m512 vz0 = _mm512_range_ps(vsat_cutoff, vx0, 0xA);
const __m512 vz1 = _mm512_range_ps(vsat_cutoff, vx1, 0xA);
const __m512 vz2 = _mm512_range_ps(vsat_cutoff, vx2, 0xA);
const __m512 vz3 = _mm512_range_ps(vsat_cutoff, vx3, 0xA);
const __m512 vz4 = _mm512_range_ps(vsat_cutoff, vx4, 0xA);
const __m512 vz5 = _mm512_range_ps(vsat_cutoff, vx5, 0xA);
const __m512 vz6 = _mm512_range_ps(vsat_cutoff, vx6, 0xA);
const __m512 vz7 = _mm512_range_ps(vsat_cutoff, vx7, 0xA);
const __m512 vz8 = _mm512_range_ps(vsat_cutoff, vx8, 0xA);
__m512 vn0 = _mm512_fmadd_ps(vz0, vminus_log2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vminus_log2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vminus_log2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vminus_log2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vminus_log2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vminus_log2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vminus_log2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vminus_log2e, vmagic_bias);
__m512 vn8 = _mm512_fmadd_ps(vz8, vminus_log2e, vmagic_bias);
const __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
const __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
const __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
const __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
const __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
const __m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
const __m512 vs6 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn6), 23));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
const __m512 vs7 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn7), 23));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
const __m512 vs8 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn8), 23));
vn8 = _mm512_sub_ps(vn8, vmagic_bias);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vln2, vz0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vln2, vz1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vln2, vz2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vln2, vz3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vln2, vz4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vln2, vz5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vln2, vz6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vln2, vz7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vln2, vz8);
__m512 vp0 = vc6;
__m512 vp1 = vc6;
__m512 vp2 = vc6;
__m512 vp3 = vc6;
__m512 vp4 = vc6;
__m512 vp5 = vc6;
__m512 vp6 = vc6;
__m512 vp7 = vc6;
__m512 vp8 = vc6;
vp0 = _mm512_fmadd_ps(vp0, vt0, vc5);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc5);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc5);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc5);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc5);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc5);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc5);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc5);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vminus_two);
vp1 = _mm512_fmadd_ps(vp1, vt1, vminus_two);
vp2 = _mm512_fmadd_ps(vp2, vt2, vminus_two);
vp3 = _mm512_fmadd_ps(vp3, vt3, vminus_two);
vp4 = _mm512_fmadd_ps(vp4, vt4, vminus_two);
vp5 = _mm512_fmadd_ps(vp5, vt5, vminus_two);
vp6 = _mm512_fmadd_ps(vp6, vt6, vminus_two);
vp7 = _mm512_fmadd_ps(vp7, vt7, vminus_two);
vp8 = _mm512_fmadd_ps(vp8, vt8, vminus_two);
const __m512 vts0 = _mm512_mul_ps(vt0, vs0);
const __m512 vsmo0 = _mm512_sub_ps(vs0, vone);
const __m512 vts1 = _mm512_mul_ps(vt1, vs1);
const __m512 vsmo1 = _mm512_sub_ps(vs1, vone);
const __m512 vts2 = _mm512_mul_ps(vt2, vs2);
const __m512 vsmo2 = _mm512_sub_ps(vs2, vone);
const __m512 vts3 = _mm512_mul_ps(vt3, vs3);
const __m512 vsmo3 = _mm512_sub_ps(vs3, vone);
const __m512 vts4 = _mm512_mul_ps(vt4, vs4);
const __m512 vsmo4 = _mm512_sub_ps(vs4, vone);
const __m512 vts5 = _mm512_mul_ps(vt5, vs5);
const __m512 vsmo5 = _mm512_sub_ps(vs5, vone);
const __m512 vts6 = _mm512_mul_ps(vt6, vs6);
const __m512 vsmo6 = _mm512_sub_ps(vs6, vone);
const __m512 vts7 = _mm512_mul_ps(vt7, vs7);
const __m512 vsmo7 = _mm512_sub_ps(vs7, vone);
const __m512 vts8 = _mm512_mul_ps(vt8, vs8);
const __m512 vsmo8 = _mm512_sub_ps(vs8, vone);
const __m512 vemo0 = _mm512_fmadd_ps(vp0, vts0, vsmo0);
const __m512 vemo1 = _mm512_fmadd_ps(vp1, vts1, vsmo1);
const __m512 vemo2 = _mm512_fmadd_ps(vp2, vts2, vsmo2);
const __m512 vemo3 = _mm512_fmadd_ps(vp3, vts3, vsmo3);
const __m512 vemo4 = _mm512_fmadd_ps(vp4, vts4, vsmo4);
const __m512 vemo5 = _mm512_fmadd_ps(vp5, vts5, vsmo5);
const __m512 vemo6 = _mm512_fmadd_ps(vp6, vts6, vsmo6);
const __m512 vemo7 = _mm512_fmadd_ps(vp7, vts7, vsmo7);
const __m512 vemo8 = _mm512_fmadd_ps(vp8, vts8, vsmo8);
const __m512 vepo0 = _mm512_sub_ps(vemo0, vminus_two);
const __m512 vepo1 = _mm512_sub_ps(vemo1, vminus_two);
const __m512 vepo2 = _mm512_sub_ps(vemo2, vminus_two);
const __m512 vepo3 = _mm512_sub_ps(vemo3, vminus_two);
const __m512 vepo4 = _mm512_sub_ps(vemo4, vminus_two);
const __m512 vepo5 = _mm512_sub_ps(vemo5, vminus_two);
const __m512 vepo6 = _mm512_sub_ps(vemo6, vminus_two);
const __m512 vepo7 = _mm512_sub_ps(vemo7, vminus_two);
const __m512 vepo8 = _mm512_sub_ps(vemo8, vminus_two);
__m512 vrepo0 = _mm512_rcp14_ps(vepo0);
__m512 vrepo1 = _mm512_rcp14_ps(vepo1);
__m512 vrepo2 = _mm512_rcp14_ps(vepo2);
__m512 vrepo3 = _mm512_rcp14_ps(vepo3);
__m512 vrepo4 = _mm512_rcp14_ps(vepo4);
__m512 vrepo5 = _mm512_rcp14_ps(vepo5);
__m512 vrepo6 = _mm512_rcp14_ps(vepo6);
__m512 vrepo7 = _mm512_rcp14_ps(vepo7);
__m512 vrepo8 = _mm512_rcp14_ps(vepo8);
const __m512 verepo0 = _mm512_fnmadd_ps(vrepo0, vepo0, vone);
const __m512 verepo1 = _mm512_fnmadd_ps(vrepo1, vepo1, vone);
const __m512 verepo2 = _mm512_fnmadd_ps(vrepo2, vepo2, vone);
const __m512 verepo3 = _mm512_fnmadd_ps(vrepo3, vepo3, vone);
const __m512 verepo4 = _mm512_fnmadd_ps(vrepo4, vepo4, vone);
const __m512 verepo5 = _mm512_fnmadd_ps(vrepo5, vepo5, vone);
const __m512 verepo6 = _mm512_fnmadd_ps(vrepo6, vepo6, vone);
const __m512 verepo7 = _mm512_fnmadd_ps(vrepo7, vepo7, vone);
const __m512 verepo8 = _mm512_fnmadd_ps(vrepo8, vepo8, vone);
vrepo0 = _mm512_fmadd_ps(verepo0, vrepo0, vrepo0);
vrepo1 = _mm512_fmadd_ps(verepo1, vrepo1, vrepo1);
vrepo2 = _mm512_fmadd_ps(verepo2, vrepo2, vrepo2);
vrepo3 = _mm512_fmadd_ps(verepo3, vrepo3, vrepo3);
vrepo4 = _mm512_fmadd_ps(verepo4, vrepo4, vrepo4);
vrepo5 = _mm512_fmadd_ps(verepo5, vrepo5, vrepo5);
vrepo6 = _mm512_fmadd_ps(verepo6, vrepo6, vrepo6);
vrepo7 = _mm512_fmadd_ps(verepo7, vrepo7, vrepo7);
vrepo8 = _mm512_fmadd_ps(verepo8, vrepo8, vrepo8);
__m512 vy0 = _mm512_mul_ps(vemo0, vrepo0);
__m512 vy1 = _mm512_mul_ps(vemo1, vrepo1);
__m512 vy2 = _mm512_mul_ps(vemo2, vrepo2);
__m512 vy3 = _mm512_mul_ps(vemo3, vrepo3);
__m512 vy4 = _mm512_mul_ps(vemo4, vrepo4);
__m512 vy5 = _mm512_mul_ps(vemo5, vrepo5);
__m512 vy6 = _mm512_mul_ps(vemo6, vrepo6);
__m512 vy7 = _mm512_mul_ps(vemo7, vrepo7);
__m512 vy8 = _mm512_mul_ps(vemo8, vrepo8);
vy0 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy0), _mm512_castps_si512(vx0), vsign_mask, 0xD8));
vy1 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy1), _mm512_castps_si512(vx1), vsign_mask, 0xD8));
vy2 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy2), _mm512_castps_si512(vx2), vsign_mask, 0xD8));
vy3 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy3), _mm512_castps_si512(vx3), vsign_mask, 0xD8));
vy4 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy4), _mm512_castps_si512(vx4), vsign_mask, 0xD8));
vy5 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy5), _mm512_castps_si512(vx5), vsign_mask, 0xD8));
vy6 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy6), _mm512_castps_si512(vx6), vsign_mask, 0xD8));
vy7 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy7), _mm512_castps_si512(vx7), vsign_mask, 0xD8));
vy8 = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy8), _mm512_castps_si512(vx8), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
_mm512_storeu_ps(output + 128, vy8);
output += 144;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_range_ps(vsat_cutoff, vx, 0xA);
__m512 vn = _mm512_fmadd_ps(vz, vminus_log2e, vmagic_bias);
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
const __m512 vt = _mm512_fmadd_ps(vn, vln2, vz);
__m512 vp = vc6;
vp = _mm512_fmadd_ps(vp, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vminus_two);
const __m512 vts = _mm512_mul_ps(vt, vs);
const __m512 vsmo = _mm512_sub_ps(vs, vone);
const __m512 vemo = _mm512_fmadd_ps(vp, vts, vsmo);
const __m512 vepo = _mm512_sub_ps(vemo, vminus_two);
__m512 vrepo = _mm512_rcp14_ps(vepo);
const __m512 verepo = _mm512_fnmadd_ps(vrepo, vepo, vone);
vrepo = _mm512_fmadd_ps(verepo, vrepo, vrepo);
__m512 vy = _mm512_mul_ps(vemo, vrepo);
vy = _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(vy), _mm512_castps_si512(vx), vsign_mask, 0xD8));
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 15,629
| 46.078313
| 127
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.