repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-neonfma-expm1minus-rr1-p6h5ts-nr1recps1fmaadj.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__neonfma_expm1minus_rr1_p6h5ts_nr1recps1fmaadj(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float32x4_t) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float32x4_t vsat_cutoff = vmovq_n_f32(0x1.205968p+3f);
const float32x4_t vminus_log2e = vmovq_n_f32(-0x1.715476p+0f);
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp+22f);
const float32x4_t vln2 = vmovq_n_f32(0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float32x4_t vc6 = vmovq_n_f32(0x1.6B7338p-4f);
const float32x4_t vc5 = vmovq_n_f32(-0x1.12278Ep-2f);
const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-1f);
const float32x4_t vc3 = vmovq_n_f32(-0x1.5554B0p+0f);
const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp+0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const float32x4_t vone = vmovq_n_f32(1.0f);
// Mask for the sign bit.
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; n != 0; n -= sizeof(float32x4_t)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float32x4_t vz = vabsq_f32(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = vminq_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * (-p)
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * (-p) + (s - 1)
// = (s - 1) - (t * s) * p
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
// Use Newton-Raphson method (2 iterations) to compute reciprocal of the denominator.
// Note: 2 < exp(-2z) + 1 <= 3, because z <= 0 and 0 < exp(-2z) <= 1.
// Thus the reciprocal of the denominator never overflows.
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float32x4_t vy = vmulq_f32(vemo, vrepo);
// Adjust reconstructred expm1(-2z) / (2 + expm1(-2z)) to match the correctly rounded division result
const float32x4_t vey = vfmsq_f32(vemo, vy, vepo);
vy = vfmaq_f32(vy, vey, vrepo);
// Reconstruct tanh(x) = copysign(y, x)
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
}
| 5,552
| 42.724409
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-neonfma-expm1minus-rr1-p6h5ts-nr2fma.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__neonfma_expm1minus_rr1_p6h5ts_nr2fma(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float32x4_t) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float32x4_t vsat_cutoff = vmovq_n_f32(0x1.205968p+3f);
const float32x4_t vminus_log2e = vmovq_n_f32(-0x1.715476p+0f);
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp+22f);
const float32x4_t vln2 = vmovq_n_f32(0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float32x4_t vc6 = vmovq_n_f32(0x1.6B7338p-4f);
const float32x4_t vc5 = vmovq_n_f32(-0x1.12278Ep-2f);
const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-1f);
const float32x4_t vc3 = vmovq_n_f32(-0x1.5554B0p+0f);
const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp+0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const float32x4_t vone = vmovq_n_f32(1.0f);
// Mask for the sign bit.
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; n != 0; n -= sizeof(float32x4_t)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float32x4_t vz = vabsq_f32(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = vminq_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * (-p)
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * (-p) + (s - 1)
// = (s - 1) - (t * s) * p
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
// Use Newton-Raphson method (2 iterations) to compute reciprocal of the denominator.
// Note: 2 < exp(-2z) + 1 <= 3, because z <= 0 and 0 < exp(-2z) <= 1.
// Thus the reciprocal of the denominator never overflows.
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float32x4_t vy = vmulq_f32(vemo, vrepo);
// Reconstruct tanh(x) = copysign(y, x)
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
}
| 5,357
| 42.209677
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-neonfma-expm1minus-rr1-p6h5ts-nr2fmaadj.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__neonfma_expm1minus_rr1_p6h5ts_nr2fmaadj(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float32x4_t) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float32x4_t vsat_cutoff = vmovq_n_f32(0x1.205968p+3f);
const float32x4_t vminus_log2e = vmovq_n_f32(-0x1.715476p+0f);
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp+22f);
const float32x4_t vln2 = vmovq_n_f32(0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float32x4_t vc6 = vmovq_n_f32(0x1.6B7338p-4f);
const float32x4_t vc5 = vmovq_n_f32(-0x1.12278Ep-2f);
const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-1f);
const float32x4_t vc3 = vmovq_n_f32(-0x1.5554B0p+0f);
const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp+0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const float32x4_t vone = vmovq_n_f32(1.0f);
// Mask for the sign bit.
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; n != 0; n -= sizeof(float32x4_t)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float32x4_t vz = vabsq_f32(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = vminq_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * (-p)
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * (-p) + (s - 1)
// = (s - 1) - (t * s) * p
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
// Use Newton-Raphson method (2 iterations) to compute reciprocal of the denominator.
// Note: 2 < exp(-2z) + 1 <= 3, because z <= 0 and 0 < exp(-2z) <= 1.
// Thus the reciprocal of the denominator never overflows.
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
verepo = vfmsq_f32(vone, vrepo, vepo);
vrepo = vfmaq_f32(vrepo, vrepo, verepo);
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float32x4_t vy = vmulq_f32(vemo, vrepo);
// Adjust reconstructred expm1(-2z) / (2 + expm1(-2z)) to match the correctly rounded division result
const float32x4_t vey = vfmsq_f32(vemo, vy, vepo);
vy = vfmaq_f32(vy, vey, vrepo);
// Reconstruct tanh(x) = copysign(y, x)
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
}
| 5,557
| 42.76378
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-neonfma-expm1minus-rr1-p6h5ts-nr2recps.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__neonfma_expm1minus_rr1_p6h5ts_nr2recps(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float32x4_t) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float32x4_t vsat_cutoff = vmovq_n_f32(0x1.205968p+3f);
const float32x4_t vminus_log2e = vmovq_n_f32(-0x1.715476p+0f);
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp+22f);
const float32x4_t vln2 = vmovq_n_f32(0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float32x4_t vc6 = vmovq_n_f32(0x1.6B7338p-4f);
const float32x4_t vc5 = vmovq_n_f32(-0x1.12278Ep-2f);
const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-1f);
const float32x4_t vc3 = vmovq_n_f32(-0x1.5554B0p+0f);
const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp+0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const float32x4_t vone = vmovq_n_f32(1.0f);
// Mask for the sign bit.
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; n != 0; n -= sizeof(float32x4_t)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float32x4_t vz = vabsq_f32(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = vminq_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * (-p)
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * (-p) + (s - 1)
// = (s - 1) - (t * s) * p
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
// Use Newton-Raphson method (2 iterations) to compute reciprocal of the denominator.
// Note: 2 < exp(-2z) + 1 <= 3, because z <= 0 and 0 < exp(-2z) <= 1.
// Thus the reciprocal of the denominator never overflows.
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float32x4_t vy = vmulq_f32(vemo, vrepo);
// Reconstruct tanh(x) = copysign(y, x)
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
}
| 5,337
| 42.048387
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-neonfma-expm1minus-rr1-p6h5ts-nr2recpsadj.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-neon-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__neonfma_expm1minus_rr1_p6h5ts_nr2recpsadj(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float32x4_t) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float32x4_t vsat_cutoff = vmovq_n_f32(0x1.205968p+3f);
const float32x4_t vminus_log2e = vmovq_n_f32(-0x1.715476p+0f);
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp+22f);
const float32x4_t vln2 = vmovq_n_f32(0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float32x4_t vc6 = vmovq_n_f32(0x1.6B7338p-4f);
const float32x4_t vc5 = vmovq_n_f32(-0x1.12278Ep-2f);
const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-1f);
const float32x4_t vc3 = vmovq_n_f32(-0x1.5554B0p+0f);
const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp+0f);
const float32x4_t vtwo = vmovq_n_f32(2.0f);
const float32x4_t vone = vmovq_n_f32(1.0f);
// Mask for the sign bit.
const uint32x4_t vsign_mask = vmovq_n_u32(UINT32_C(0x80000000));
for (; n != 0; n -= sizeof(float32x4_t)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float32x4_t vz = vabsq_f32(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = vminq_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float32x4_t vt = vfmaq_f32(vz, vn, vln2);
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * (-p)
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmsq_f32(vtwo, vp, vt);
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * (-p) + (s - 1)
// = (s - 1) - (t * s) * p
const float32x4_t vts = vmulq_f32(vt, vs);
const float32x4_t vsmo = vsubq_f32(vs, vone);
const float32x4_t vemo = vfmsq_f32(vsmo, vp, vts);
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float32x4_t vepo = vaddq_f32(vemo, vtwo);
// Use Newton-Raphson method (2 iterations) to compute reciprocal of the denominator.
// Note: 2 < exp(-2z) + 1 <= 3, because z <= 0 and 0 < exp(-2z) <= 1.
// Thus the reciprocal of the denominator never overflows.
float32x4_t vrepo = vrecpeq_f32(vepo);
float32x4_t verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
verepo = vrecpsq_f32(vrepo, vepo);
vrepo = vmulq_f32(vrepo, verepo);
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float32x4_t vy = vmulq_f32(vemo, vrepo);
// Adjust reconstructred expm1(-2z) / (2 + expm1(-2z)) to match the correctly rounded division result
const float32x4_t vey = vfmsq_f32(vemo, vy, vepo);
vy = vfmaq_f32(vy, vey, vrepo);
// Reconstruct tanh(x) = copysign(y, x)
vy = vbslq_f32(vsign_mask, vx, vy);
vst1q_f32(output, vy); output += 4;
}
}
| 5,537
| 42.606299
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut16-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut16_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/64, log(2)/64]
const float vc3 = 0x1.55561Cp-1f;
const float vc2 = -0x1.0001ECp+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-3 polynomial approximation for exp(-2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = -2 * (t + t * (t * (c2 + t * c3)))
// = -2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,142
| 40.475806
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut16-p4h2ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut16_p4h2ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/64, log(2)/64]
const float vc4 = -0x1.55563Ap-2f;
const float vc3 = 0x1.555708p-1f;
const float vc2 = -0x1.000000p+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = -2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,236
| 40.563492
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut16-p4h2ts-rcp.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut16_p4h2ts_rcp(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/64, log(2)/64]
const float vc4 = -0x1.55563Ap-2f;
const float vc3 = 0x1.555708p-1f;
const float vc2 = -0x1.000000p+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = -2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Compute reciprocal of denominator.
const float vrepo = vone / vepo;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo * vrepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,317
| 40.224806
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut16-p4h3ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut16_p4h3ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/64, log(2)/64]
const float vc4 = 0x1.55563Ap-1f;
const float vc3 = -0x1.555708p+0f;
const float vc2 = 0x1.000000p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,164
| 40.32
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut16-p4h3ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut16_p4h3ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/64, log(2)/64]
const float vc4 = 0x1.55563Ap-1f;
const float vc3 = -0x1.555708p+0f;
const float vc2 = 0x1.000000p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,164
| 40.32
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut32-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 32) values decremented (as integer) by (k << 18), k = 0..31
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_32[32];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut32_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-6)
const float vmagic_bias = 0x1.800000p+17f;
// Mask for the lowest 5 bits
const uint32_t vindex_mask = UINT32_C(0x1F);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/128, log(2)/128]
const float vc3 = 0x1.555582p-1f;
const float vc2 = -0x1.00007Ap+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 6).
// We do it by adding a large number (magic bias), which cause rounding of the result to 6 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**16, i.e. |z| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 6 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 5 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 5:13 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 18;
// Use bits 0:5 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_32[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 6) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-3 polynomial approximation for exp(-2t) - 1 on [-log(2)/128, log(2)/128].
// P(t) = -2 * (t + t * (t * (c2 + t * c3)))
// = -2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,148
| 40.524194
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut4-p4h2ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_4[4];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut4_p4h2ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-3)
const float vmagic_bias = 0x1.800000p+20f;
// Mask for the lowest 2 bits
const uint32_t vindex_mask = UINT32_C(0x3);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/16, log(2)/16]
const float vc4 = -0x1.554F9Ap-2f;
const float vc3 = 0x1.557082p-1f;
const float vc2 = -0x1.000002p+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**19, i.e. |z| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 3 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 2 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 21;
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_4[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 3) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = -2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,229
| 40.507937
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut4-p4h2ts-rcp.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_4[4];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut4_p4h2ts_rcp(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-3)
const float vmagic_bias = 0x1.800000p+20f;
// Mask for the lowest 2 bits
const uint32_t vindex_mask = UINT32_C(0x3);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/16, log(2)/16]
const float vc4 = -0x1.554F9Ap-2f;
const float vc3 = 0x1.557082p-1f;
const float vc2 = -0x1.000002p+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**19, i.e. |z| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 3 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 2 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 21;
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_4[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 3) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = -2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Compute reciprocal of denominator.
const float vrepo = vone / vepo;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo * vrepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,310
| 40.170543
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut4-p4h3ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_4[4];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut4_p4h3ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-3)
const float vmagic_bias = 0x1.800000p+20f;
// Mask for the lowest 2 bits
const uint32_t vindex_mask = UINT32_C(0x3);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/16, log(2)/16]
const float vc4 = 0x1.554F9Ap-1f;
const float vc3 = -0x1.557082p+0f;
const float vc2 = 0x1.000002p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**19, i.e. |z| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 3 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 2 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 21;
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_4[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 3) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,157
| 40.264
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut4-p4h3ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_4[4];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut4_p4h3ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-3)
const float vmagic_bias = 0x1.800000p+20f;
// Mask for the lowest 2 bits
const uint32_t vindex_mask = UINT32_C(0x3);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/16, log(2)/16]
const float vc4 = 0x1.554F9Ap-1f;
const float vc3 = -0x1.557082p+0f;
const float vc2 = 0x1.000002p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**19, i.e. |z| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 3 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 2 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 21;
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_4[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 3) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,157
| 40.264
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut64-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 64) values decremented (as integer) by (k << 17), k = 0..63
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_64[64];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut64_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-7)
const float vmagic_bias = 0x1.800000p+16f;
// Mask for the lowest 6 bits
const uint32_t vindex_mask = UINT32_C(0x3F);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/256, log(2)/256]
const float vc3 = 0x1.55555Ep-1f;
const float vc2 = -0x1.00001Ep+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 7).
// We do it by adding a large number (magic bias), which cause rounding of the result to 7 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**15, i.e. |z| <= 0x1.62E43p+14 = 22713.046875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 7 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 6 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 6:14 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 17;
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_64[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 7) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-3 polynomial approximation for exp(-2t) - 1 on [-log(2)/256, log(2)/256].
// P(t) = -2 * (t + t * (t * (c2 + t * c3)))
// = -2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,149
| 40.532258
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut8-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut8_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/32, log(2)/32]
const float vc3 = 0x1.555862p-1f;
const float vc2 = -0x1.0007ACp+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-3 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = -2 * (t + t * (t * (c2 + t * c3)))
// = -2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,136
| 40.427419
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut8-p4h2ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut8_p4h2ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/32, log(2)/32]
const float vc4 = -0x1.5558ECp-2f;
const float vc3 = 0x1.555C20p-1f;
const float vc2 = -0x1.000000p+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = -2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,230
| 40.515873
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut8-p4h2ts-rcp.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut8_p4h2ts_rcp(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/32, log(2)/32]
const float vc4 = -0x1.5558ECp-2f;
const float vc3 = 0x1.555C20p-1f;
const float vc2 = -0x1.000000p+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = -2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Compute reciprocal of denominator.
const float vrepo = vone / vepo;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo * vrepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,311
| 40.178295
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut8-p4h3ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut8_p4h3ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.5558ECp-1f;
const float vc3 = -0x1.555C20p+0f;
const float vc2 = 0x1.000000p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,158
| 40.272
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut8-p4h3ps-rcp.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut8_p4h3ps_rcp(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.5558ECp-1f;
const float vc3 = -0x1.555C20p+0f;
const float vc2 = 0x1.000000p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Compute reciprocal of denominator.
const float vrepo = vone / vepo;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo * vrepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,239
| 39.9375
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut8-p4h3ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut8_p4h3ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.5558ECp-1f;
const float vc3 = -0x1.555C20p+0f;
const float vc2 = 0x1.000000p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,158
| 40.272
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-lut8-p4h3ts-rcp.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1minus_rr1_lut8_p4h3ts_rcp(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.5558ECp-1f;
const float vc3 = -0x1.555C20p+0f;
const float vc2 = 0x1.000000p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Compute reciprocal of denominator.
const float vrepo = vone / vepo;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo * vrepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,239
| 39.9375
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-p6h4ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__scalar_expm1minus_rr1_p6h4ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float vmagic_bias = 0x1.8000FEp+22f;
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))))
// on [-log(2)/4, log(2)/4]
const float vc6 = -0x1.6B7338p-5f;
const float vc5 = 0x1.12278Ep-3f;
const float vc4 = -0x1.555716p-2f;
const float vc3 = 0x1.5554B0p-1f;
const float vc2 = -0x1.FFFFFEp-1f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = -2 * (t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))))
// = -2 * (t + t * p)
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 4,373
| 38.405405
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-p6h5ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__scalar_expm1minus_rr1_p6h5ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float vmagic_bias = 0x1.8000FEp+22f;
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float vc6 = 0x1.6B7338p-4f;
const float vc5 = -0x1.12278Ep-2f;
const float vc4 = 0x1.555716p-1f;
const float vc3 = -0x1.5554B0p+0f;
const float vc2 = 0x1.FFFFFEp+0f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 4,301
| 38.109091
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-p6h5ps-rcp.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__scalar_expm1minus_rr1_p6h5ps_rcp(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float vmagic_bias = 0x1.8000FEp+22f;
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float vc6 = 0x1.6B7338p-4f;
const float vc5 = -0x1.12278Ep-2f;
const float vc4 = 0x1.555716p-1f;
const float vc3 = -0x1.5554B0p+0f;
const float vc2 = 0x1.FFFFFEp+0f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Compute reciprocal of denominator.
const float vrepo = vone / vepo;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo * vrepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 4,382
| 37.787611
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-p6h5ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__scalar_expm1minus_rr1_p6h5ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float vmagic_bias = 0x1.8000FEp+22f;
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float vc6 = 0x1.6B7338p-4f;
const float vc5 = -0x1.12278Ep-2f;
const float vc4 = 0x1.555716p-1f;
const float vc3 = -0x1.5554B0p+0f;
const float vc2 = 0x1.FFFFFEp+0f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 4,301
| 38.109091
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr1-p6h5ts-rcp.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__scalar_expm1minus_rr1_p6h5ts_rcp(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float vmagic_bias = 0x1.8000FEp+22f;
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float vc6 = 0x1.6B7338p-4f;
const float vc5 = -0x1.12278Ep-2f;
const float vc4 = 0x1.555716p-1f;
const float vc3 = -0x1.5554B0p+0f;
const float vc2 = 0x1.FFFFFEp+0f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Compute reciprocal of denominator.
const float vrepo = vone / vepo;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo * vrepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 4,382
| 37.787611
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut16-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut16_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
// Last 8 bits are zeroes
const float vln2_hi = 0x1.62E400p-1f;
const float vln2_lo = 0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/64, log(2)/64]
const float vc3 = 0x1.55561Cp-1f;
const float vc2 = -0x1.0001ECp+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-3 polynomial approximation for exp(-2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = -2 * (t + t * (t * (c2 + t * c3)))
// = -2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,346
| 40.773438
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut16-p4h2ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut16_p4h2ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
// Last 8 bits are zeroes
const float vln2_hi = 0x1.62E400p-1f;
const float vln2_lo = 0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/64, log(2)/64]
const float vc4 = -0x1.55563Ap-2f;
const float vc3 = 0x1.555708p-1f;
const float vc2 = -0x1.000000p+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = -2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,440
| 40.853846
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut16-p4h3ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut16_p4h3ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
// Last 8 bits are zeroes
const float vln2_hi = 0x1.62E400p-1f;
const float vln2_lo = 0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/64, log(2)/64]
const float vc4 = 0x1.55563Ap-1f;
const float vc3 = -0x1.555708p+0f;
const float vc2 = 0x1.000000p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,368
| 40.620155
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut16-p4h3ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut16_p4h3ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
// Last 8 bits are zeroes
const float vln2_hi = 0x1.62E400p-1f;
const float vln2_lo = 0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/64, log(2)/64]
const float vc4 = 0x1.55563Ap-1f;
const float vc3 = -0x1.555708p+0f;
const float vc2 = 0x1.000000p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,368
| 40.620155
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut32-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 32) values decremented (as integer) by (k << 18), k = 0..31
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_32[32];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut32_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-6)
const float vmagic_bias = 0x1.800000p+17f;
// Mask for the lowest 5 bits
const uint32_t vindex_mask = UINT32_C(0x1F);
// Last 9 bits are zeroes
const float vln2_hi = 0x1.62E400p-1f;
const float vln2_lo = 0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/128, log(2)/128]
const float vc3 = 0x1.555582p-1f;
const float vc2 = -0x1.00007Ap+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 6).
// We do it by adding a large number (magic bias), which cause rounding of the result to 6 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**16, i.e. |z| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 6 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 5 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 5:13 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 18;
// Use bits 0:5 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_32[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 6) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-3 polynomial approximation for exp(-2t) - 1 on [-log(2)/128, log(2)/128].
// P(t) = -2 * (t + t * (t * (c2 + t * c3)))
// = -2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,352
| 40.820313
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut4-p4h2ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_4[4];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut4_p4h2ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-3)
const float vmagic_bias = 0x1.800000p+20f;
// Mask for the lowest 2 bits
const uint32_t vindex_mask = UINT32_C(0x3);
// Last 6 bits are zeroes
const float vln2_hi = 0x1.62E400p-1f;
const float vln2_lo = 0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/16, log(2)/16]
const float vc4 = -0x1.554F9Ap-2f;
const float vc3 = 0x1.557082p-1f;
const float vc2 = -0x1.000002p+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**19, i.e. |z| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 3 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 2 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 21;
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_4[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 3) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = -2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,433
| 40.8
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut4-p4h3ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_4[4];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut4_p4h3ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-3)
const float vmagic_bias = 0x1.800000p+20f;
// Mask for the lowest 2 bits
const uint32_t vindex_mask = UINT32_C(0x3);
// Last 6 bits are zeroes
const float vln2_hi = 0x1.62E400p-1f;
const float vln2_lo = 0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/16, log(2)/16]
const float vc4 = 0x1.554F9Ap-1f;
const float vc3 = -0x1.557082p+0f;
const float vc2 = 0x1.000002p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**19, i.e. |z| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 3 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 2 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 21;
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_4[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 3) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,361
| 40.565891
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut4-p4h3ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_4[4];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut4_p4h3ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-3)
const float vmagic_bias = 0x1.800000p+20f;
// Mask for the lowest 2 bits
const uint32_t vindex_mask = UINT32_C(0x3);
// Last 6 bits are zeroes
const float vln2_hi = 0x1.62E400p-1f;
const float vln2_lo = 0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/16, log(2)/16]
const float vc4 = 0x1.554F9Ap-1f;
const float vc3 = -0x1.557082p+0f;
const float vc2 = 0x1.000002p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**19, i.e. |z| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 3 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 2 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 21;
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_4[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 3) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,361
| 40.565891
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut64-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 64) values decremented (as integer) by (k << 17), k = 0..63
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_64[64];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut64_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-7)
const float vmagic_bias = 0x1.800000p+16f;
// Mask for the lowest 6 bits
const uint32_t vindex_mask = UINT32_C(0x3F);
// Last 10 bits are zeroes
const float vln2_hi = 0x1.62E800p-1f;
const float vln2_lo = -0x1.E8082Ep-16f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/256, log(2)/256]
const float vc3 = 0x1.55555Ep-1f;
const float vc2 = -0x1.00001Ep+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 7).
// We do it by adding a large number (magic bias), which cause rounding of the result to 7 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**15, i.e. |z| <= 0x1.62E43p+14 = 22713.046875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 7 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 6 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 6:14 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 17;
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_64[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 7) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-3 polynomial approximation for exp(-2t) - 1 on [-log(2)/256, log(2)/256].
// P(t) = -2 * (t + t * (t * (c2 + t * c3)))
// = -2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,355
| 40.84375
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut8-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut8_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
// Last 7 bits are zeroes
const float vln2_hi = 0x1.62E400p-1f;
const float vln2_lo = 0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/32, log(2)/32]
const float vc3 = 0x1.555862p-1f;
const float vc2 = -0x1.0007ACp+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-3 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = -2 * (t + t * (t * (c2 + t * c3)))
// = -2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,340
| 40.726563
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut8-p4h2ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut8_p4h2ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
// Last 7 bits are zeroes
const float vln2_hi = 0x1.62E400p-1f;
const float vln2_lo = 0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/32, log(2)/32]
const float vc4 = -0x1.5558ECp-2f;
const float vc3 = 0x1.555C20p-1f;
const float vc2 = -0x1.000000p+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = -2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,434
| 40.807692
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut8-p4h2ts-rcp.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut8_p4h2ts_rcp(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
// Last 7 bits are zeroes
const float vln2_hi = 0x1.62E400p-1f;
const float vln2_lo = 0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/32, log(2)/32]
const float vc4 = -0x1.5558ECp-2f;
const float vc3 = 0x1.555C20p-1f;
const float vc2 = -0x1.000000p+0f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = -2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = -2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Compute reciprocal of denominator.
const float vrepo = vone / vepo;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo * vrepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,515
| 40.473684
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut8-p4h3ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut8_p4h3ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
// Last 7 bits are zeroes
const float vln2_hi = 0x1.62E400p-1f;
const float vln2_lo = 0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.5558ECp-1f;
const float vc3 = -0x1.555C20p+0f;
const float vc2 = 0x1.000000p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,362
| 40.573643
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut8-p4h3ps-rcp.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut8_p4h3ps_rcp(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
// Last 7 bits are zeroes
const float vln2_hi = 0x1.62E400p-1f;
const float vln2_lo = 0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.5558ECp-1f;
const float vc3 = -0x1.555C20p+0f;
const float vc2 = 0x1.000000p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Compute reciprocal of denominator.
const float vrepo = vone / vepo;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo * vrepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,443
| 40.242424
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut8-p4h3ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut8_p4h3ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
// Last 7 bits are zeroes
const float vln2_hi = 0x1.62E400p-1f;
const float vln2_lo = 0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.5558ECp-1f;
const float vc3 = -0x1.555C20p+0f;
const float vc2 = 0x1.000000p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,362
| 40.573643
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-lut8-p4h3ts-rcp.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1minus_rr2_lut8_p4h3ts_rcp(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
// Last 7 bits are zeroes
const float vln2_hi = 0x1.62E400p-1f;
const float vln2_lo = 0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.5558ECp-1f;
const float vc3 = -0x1.555C20p+0f;
const float vc2 = 0x1.000000p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Compute reciprocal of denominator.
const float vrepo = vone / vepo;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo * vrepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,443
| 40.242424
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-p6h4ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__scalar_expm1minus_rr2_p6h4ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float vmagic_bias = 0x1.8000FEp+22f;
// Last 4 bits are zeroes
const float vln2_hi = 0x1.62E420p-1f;
const float vln2_lo = 0x1.FDF474p-22f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ -2 * (t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))))
// on [-log(2)/4, log(2)/4]
const float vc6 = -0x1.6B7338p-5f;
const float vc5 = 0x1.12278Ep-3f;
const float vc4 = -0x1.555716p-2f;
const float vc3 = 0x1.5554B0p-1f;
const float vc2 = -0x1.FFFFFEp-1f;
const float vone = 1.0f;
const float vminus_two = -2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = -2 * (t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))))
// = -2 * (t + t * p)
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (-2 * (t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))) + 1) - 1
// = s * (-2 * (t + t * p) + 1) - 1
// = (s - 1) - 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vminus_two + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 4,577
| 38.808696
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-p6h5ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__scalar_expm1minus_rr2_p6h5ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float vmagic_bias = 0x1.8000FEp+22f;
// Last 4 bits are zeroes
const float vln2_hi = 0x1.62E420p-1f;
const float vln2_lo = 0x1.FDF474p-22f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float vc6 = 0x1.6B7338p-4f;
const float vc5 = -0x1.12278Ep-2f;
const float vc4 = 0x1.555716p-1f;
const float vc3 = -0x1.5554B0p+0f;
const float vc2 = 0x1.FFFFFEp+0f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 4,505
| 38.526316
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1minus-rr2-p6h5ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__scalar_expm1minus_rr2_p6h5ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float vmagic_bias = 0x1.8000FEp+22f;
// Last 4 bits are zeroes
const float vln2_hi = 0x1.62E420p-1f;
const float vln2_lo = 0x1.FDF474p-22f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float vc6 = 0x1.6B7338p-4f;
const float vc5 = -0x1.12278Ep-2f;
const float vc4 = 0x1.555716p-1f;
const float vc3 = -0x1.5554B0p+0f;
const float vc2 = 0x1.FFFFFEp+0f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 4,505
| 38.526316
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-lut16-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1plus_rr1_lut16_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/64, log(2)/64]
const float vc3 = 0x1.55561Cp-1f;
const float vc2 = 0x1.0001ECp+0f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-3 polynomial approximation for exp(2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = 2 * (t + t * (t * (c2 + t * c3)))
// = 2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,063
| 39.83871
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-lut16-p4h2ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1plus_rr1_lut16_p4h2ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/64, log(2)/64]
const float vc4 = 0x1.55563Ap-2f;
const float vc3 = 0x1.555708p-1f;
const float vc2 = 0x1.000000p+0f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = 2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,156
| 39.928571
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-lut16-p4h3ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1plus_rr1_lut16_p4h3ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/64, log(2)/64]
const float vc4 = 0x1.55563Ap-1f;
const float vc3 = 0x1.555708p+0f;
const float vc2 = 0x1.000000p+1f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,088
| 39.712
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-lut16-p4h3ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1plus_rr1_lut16_p4h3ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/64, log(2)/64]
const float vc4 = 0x1.55563Ap-1f;
const float vc3 = 0x1.555708p+0f;
const float vc2 = 0x1.000000p+1f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,088
| 39.712
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-lut32-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 32) values decremented (as integer) by (k << 18), k = 0..31
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_32[32];
void xnn_math_f32_tanh__scalar_expm1plus_rr1_lut32_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-6)
const float vmagic_bias = 0x1.800000p+17f;
// Mask for the lowest 5 bits
const uint32_t vindex_mask = UINT32_C(0x1F);
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/128, log(2)/128]
const float vc3 = 0x1.555582p-1f;
const float vc2 = 0x1.00007Ap+0f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 6).
// We do it by adding a large number (magic bias), which cause rounding of the result to 6 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**16, i.e. |z| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 6 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 5 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 5:13 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 18;
// Use bits 0:5 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_32[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 6) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-3 polynomial approximation for exp(2t) - 1 on [-log(2)/128, log(2)/128].
// P(t) = 2 * (t + t * (t * (c2 + t * c3)))
// = 2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,069
| 39.887097
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-lut4-p4h2ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_4[4];
void xnn_math_f32_tanh__scalar_expm1plus_rr1_lut4_p4h2ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-3)
const float vmagic_bias = 0x1.800000p+20f;
// Mask for the lowest 2 bits
const uint32_t vindex_mask = UINT32_C(0x3);
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/16, log(2)/16]
const float vc4 = 0x1.554F9Ap-2f;
const float vc3 = 0x1.557082p-1f;
const float vc2 = 0x1.000002p+0f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**19, i.e. |z| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 3 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 2 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 21;
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_4[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 3) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = 2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,149
| 39.873016
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-lut4-p4h3ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_4[4];
void xnn_math_f32_tanh__scalar_expm1plus_rr1_lut4_p4h3ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-3)
const float vmagic_bias = 0x1.800000p+20f;
// Mask for the lowest 2 bits
const uint32_t vindex_mask = UINT32_C(0x3);
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/16, log(2)/16]
const float vc4 = 0x1.554F9Ap-1f;
const float vc3 = 0x1.557082p+0f;
const float vc2 = 0x1.000002p+1f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**19, i.e. |z| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 3 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 2 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 21;
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_4[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 3) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,081
| 39.656
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-lut4-p4h3ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_4[4];
void xnn_math_f32_tanh__scalar_expm1plus_rr1_lut4_p4h3ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-3)
const float vmagic_bias = 0x1.800000p+20f;
// Mask for the lowest 2 bits
const uint32_t vindex_mask = UINT32_C(0x3);
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/16, log(2)/16]
const float vc4 = 0x1.554F9Ap-1f;
const float vc3 = 0x1.557082p+0f;
const float vc2 = 0x1.000002p+1f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**19, i.e. |z| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 3 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 2 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 21;
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_4[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 3) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,081
| 39.656
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-lut64-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 64) values decremented (as integer) by (k << 17), k = 0..63
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_64[64];
void xnn_math_f32_tanh__scalar_expm1plus_rr1_lut64_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-7)
const float vmagic_bias = 0x1.800000p+16f;
// Mask for the lowest 6 bits
const uint32_t vindex_mask = UINT32_C(0x3F);
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/256, log(2)/256]
const float vc3 = 0x1.55555Ep-1f;
const float vc2 = 0x1.00001Ep+0f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 7).
// We do it by adding a large number (magic bias), which cause rounding of the result to 7 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**15, i.e. |z| <= 0x1.62E43p+14 = 22713.046875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 7 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 6 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 6:14 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 17;
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_64[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 7) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-3 polynomial approximation for exp(2t) - 1 on [-log(2)/256, log(2)/256].
// P(t) = 2 * (t + t * (t * (c2 + t * c3)))
// = 2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,070
| 39.895161
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-lut8-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1plus_rr1_lut8_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/32, log(2)/32]
const float vc3 = 0x1.555862p-1f;
const float vc2 = 0x1.0007ACp+0f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-3 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = 2 * (t + t * (t * (c2 + t * c3)))
// = 2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,057
| 39.790323
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-lut8-p4h2ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1plus_rr1_lut8_p4h2ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.5558ECp-2f;
const float vc3 = 0x1.555C20p-1f;
const float vc2 = 0x1.000000p+0f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = 2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,150
| 39.880952
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-lut8-p4h3ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1plus_rr1_lut8_p4h3ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.5558ECp-1f;
const float vc3 = 0x1.555C20p+0f;
const float vc2 = 0x1.000000p+1f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,082
| 39.664
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-lut8-p4h3ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1plus_rr1_lut8_p4h3ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.5558ECp-1f;
const float vc3 = 0x1.555C20p+0f;
const float vc2 = 0x1.000000p+1f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,082
| 39.664
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-p6h4ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__scalar_expm1plus_rr1_p6h4ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float vmagic_bias = 0x1.8000FEp+22f;
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))))
// on [-log(2)/4, log(2)/4]
const float vc6 = 0x1.6B7338p-5f;
const float vc5 = 0x1.12278Ep-3f;
const float vc4 = 0x1.555716p-2f;
const float vc3 = 0x1.5554B0p-1f;
const float vc2 = 0x1.FFFFFEp-1f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(z / log(2), 1) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-6 polynomial approximation for exp(2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = 2 * (t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))))
// = 2 * (t + t * p)
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 4,292
| 37.675676
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-p6h5ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__scalar_expm1plus_rr1_p6h5ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float vmagic_bias = 0x1.8000FEp+22f;
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float vc6 = 0x1.6B7338p-4f;
const float vc5 = 0x1.12278Ep-2f;
const float vc4 = 0x1.555716p-1f;
const float vc3 = 0x1.5554B0p+0f;
const float vc2 = 0x1.FFFFFEp+0f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(z / log(2), 1) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-6 polynomial approximation for exp(2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 4,224
| 37.409091
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr1-p6h5ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__scalar_expm1plus_rr1_p6h5ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float vmagic_bias = 0x1.8000FEp+22f;
const float vminus_ln2 = -0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float vc6 = 0x1.6B7338p-4f;
const float vc5 = 0x1.12278Ep-2f;
const float vc4 = 0x1.555716p-1f;
const float vc3 = 0x1.5554B0p+0f;
const float vc2 = 0x1.FFFFFEp+0f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(z / log(2), 1) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
const float vt = vn * vminus_ln2 + vz;
// Compute degree-6 polynomial approximation for exp(2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 4,224
| 37.409091
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-lut16-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1plus_rr2_lut16_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
// Last 8 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/64, log(2)/64]
const float vc3 = 0x1.55561Cp-1f;
const float vc2 = 0x1.0001ECp+0f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-3 polynomial approximation for exp(2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = 2 * (t + t * (t * (c2 + t * c3)))
// = 2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,280
| 40.257813
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-lut16-p4h2ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1plus_rr2_lut16_p4h2ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
// Last 8 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/64, log(2)/64]
const float vc4 = 0x1.55563Ap-2f;
const float vc3 = 0x1.555708p-1f;
const float vc2 = 0x1.000000p+0f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = 2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,373
| 40.338462
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-lut16-p4h3ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1plus_rr2_lut16_p4h3ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
// Last 8 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/64, log(2)/64]
const float vc4 = 0x1.55563Ap-1f;
const float vc3 = 0x1.555708p+0f;
const float vc2 = 0x1.000000p+1f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,305
| 40.131783
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-lut16-p4h3ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_tanh__scalar_expm1plus_rr2_lut16_p4h3ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-5)
const float vmagic_bias = 0x1.800000p+18f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
// Last 8 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/64, log(2)/64]
const float vc4 = 0x1.55563Ap-1f;
const float vc3 = 0x1.555708p+0f;
const float vc2 = 0x1.000000p+1f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of the result to 5 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**17, i.e. |z| <= 0x1.62E43p+16 = 90852.1875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 5 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_16[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 5) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/64, log(2)/64].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,305
| 40.131783
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-lut32-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 32) values decremented (as integer) by (k << 18), k = 0..31
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_32[32];
void xnn_math_f32_tanh__scalar_expm1plus_rr2_lut32_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-6)
const float vmagic_bias = 0x1.800000p+17f;
// Mask for the lowest 5 bits
const uint32_t vindex_mask = UINT32_C(0x1F);
// Last 9 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/128, log(2)/128]
const float vc3 = 0x1.555582p-1f;
const float vc2 = 0x1.00007Ap+0f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 6).
// We do it by adding a large number (magic bias), which cause rounding of the result to 6 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**16, i.e. |z| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 6 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 5 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 5:13 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 18;
// Use bits 0:5 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_32[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 6) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-3 polynomial approximation for exp(2t) - 1 on [-log(2)/128, log(2)/128].
// P(t) = 2 * (t + t * (t * (c2 + t * c3)))
// = 2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,286
| 40.304688
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-lut4-p4h2ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_4[4];
void xnn_math_f32_tanh__scalar_expm1plus_rr2_lut4_p4h2ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-3)
const float vmagic_bias = 0x1.800000p+20f;
// Mask for the lowest 2 bits
const uint32_t vindex_mask = UINT32_C(0x3);
// Last 6 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/16, log(2)/16]
const float vc4 = 0x1.554F9Ap-2f;
const float vc3 = 0x1.557082p-1f;
const float vc2 = 0x1.000002p+0f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**19, i.e. |z| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 3 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 2 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 21;
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_4[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 3) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = 2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,366
| 40.284615
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-lut4-p4h3ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_4[4];
void xnn_math_f32_tanh__scalar_expm1plus_rr2_lut4_p4h3ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-3)
const float vmagic_bias = 0x1.800000p+20f;
// Mask for the lowest 2 bits
const uint32_t vindex_mask = UINT32_C(0x3);
// Last 6 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/16, log(2)/16]
const float vc4 = 0x1.554F9Ap-1f;
const float vc3 = 0x1.557082p+0f;
const float vc2 = 0x1.000002p+1f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**19, i.e. |z| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 3 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 2 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 21;
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_4[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 3) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,298
| 40.077519
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-lut4-p4h3ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_4[4];
void xnn_math_f32_tanh__scalar_expm1plus_rr2_lut4_p4h3ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-3)
const float vmagic_bias = 0x1.800000p+20f;
// Mask for the lowest 2 bits
const uint32_t vindex_mask = UINT32_C(0x3);
// Last 6 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/16, log(2)/16]
const float vc4 = 0x1.554F9Ap-1f;
const float vc3 = 0x1.557082p+0f;
const float vc2 = 0x1.000002p+1f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**19, i.e. |z| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 3 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 2 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 21;
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_4[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 3) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,298
| 40.077519
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-lut64-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 64) values decremented (as integer) by (k << 17), k = 0..63
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_64[64];
void xnn_math_f32_tanh__scalar_expm1plus_rr2_lut64_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-7)
const float vmagic_bias = 0x1.800000p+16f;
// Mask for the lowest 6 bits
const uint32_t vindex_mask = UINT32_C(0x3F);
// Last 10 bits are zeroes
const float vminus_ln2_hi = -0x1.62E800p-1f;
const float vminus_ln2_lo = 0x1.E8082Ep-16f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/256, log(2)/256]
const float vc3 = 0x1.55555Ep-1f;
const float vc2 = 0x1.00001Ep+0f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 7).
// We do it by adding a large number (magic bias), which cause rounding of the result to 7 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**15, i.e. |z| <= 0x1.62E43p+14 = 22713.046875), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 7 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 6 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 6:14 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 17;
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_64[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 7) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-3 polynomial approximation for exp(2t) - 1 on [-log(2)/256, log(2)/256].
// P(t) = 2 * (t + t * (t * (c2 + t * c3)))
// = 2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,287
| 40.3125
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-lut8-p3h1ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1plus_rr2_lut8_p3h1ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
// Last 7 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * c3)))
// on [-log(2)/32, log(2)/32]
const float vc3 = 0x1.555862p-1f;
const float vc2 = 0x1.0007ACp+0f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-3 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = 2 * (t + t * (t * (c2 + t * c3)))
// = 2 * (t + t * p)
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * c3))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,274
| 40.210938
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-lut8-p4h2ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1plus_rr2_lut8_p4h2ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
// Last 7 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.5558ECp-2f;
const float vc3 = 0x1.555C20p-1f;
const float vc2 = 0x1.000000p+0f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = 2 * (t + t * p)
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,367
| 40.292308
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-lut8-p4h3ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1plus_rr2_lut8_p4h3ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
// Last 7 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.5558ECp-1f;
const float vc3 = 0x1.555C20p+0f;
const float vc2 = 0x1.000000p+1f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,299
| 40.085271
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-lut8-p4h3ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__scalar_expm1plus_rr2_lut8_p4h3ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
// Last 7 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.5558ECp-1f;
const float vc3 = 0x1.555C20p+0f;
const float vc2 = 0x1.000000p+1f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have 0 <= int(n) <= 13, and thus the adjusted exponent is not
// greater than 13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,299
| 40.085271
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-p6h4ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__scalar_expm1plus_rr2_p6h4ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float vmagic_bias = 0x1.8000FEp+22f;
// Last 4 bits are zeroes
const float vminus_ln2_hi = -0x1.62E420p-1f;
const float vminus_ln2_lo = -0x1.FDF474p-22f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))))
// on [-log(2)/4, log(2)/4]
const float vc6 = 0x1.6B7338p-5f;
const float vc5 = 0x1.12278Ep-3f;
const float vc4 = 0x1.555716p-2f;
const float vc3 = 0x1.5554B0p-1f;
const float vc2 = 0x1.FFFFFEp-1f;
const float vone = 1.0f;
const float vtwo = 2.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(z / log(2), 1) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-6 polynomial approximation for exp(2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = 2 * (t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))))
// = 2 * (t + t * p)
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const float vts = vt * vs;
const float vsmo = vs - vone;
vp = vp * vts + vts;
const float vemo = vp * vtwo + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 4,509
| 38.217391
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-p6h5ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__scalar_expm1plus_rr2_p6h5ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float vmagic_bias = 0x1.8000FEp+22f;
// Last 4 bits are zeroes
const float vminus_ln2_hi = -0x1.62E420p-1f;
const float vminus_ln2_lo = -0x1.FDF474p-22f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float vc6 = 0x1.6B7338p-4f;
const float vc5 = 0x1.12278Ep-2f;
const float vc4 = 0x1.555716p-1f;
const float vc3 = 0x1.5554B0p+0f;
const float vc2 = 0x1.FFFFFEp+0f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(z / log(2), 1) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-6 polynomial approximation for exp(2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 4,441
| 37.964912
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-scalar-expm1plus-rr2-p6h5ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1plus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__scalar_expm1plus_rr2_p6h5ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(z) is saturated at 1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vlog2e = 0x1.715476p+0f;
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float vmagic_bias = 0x1.8000FEp+22f;
// Last 4 bits are zeroes
const float vminus_ln2_hi = -0x1.62E420p-1f;
const float vminus_ln2_lo = -0x1.FDF474p-22f;
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float vc6 = 0x1.6B7338p-4f;
const float vc5 = 0x1.12278Ep-2f;
const float vc4 = 0x1.555716p-1f;
const float vc3 = 0x1.5554B0p+0f;
const float vc2 = 0x1.FFFFFEp+0f;
const float vtwo = 2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(2z) / (2 + expm1(2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = math_pmin_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(z / log(2), 1) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-6 polynomial approximation for exp(2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vtwo;
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const float vepo = vemo + vtwo;
// Reconstruct y = expm1(2z) / (expm1(2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 4,441
| 37.964912
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-sse2-expm1minus-rr1-lut8-p4h3ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__sse2_expm1minus_rr1_lut8_p4h3ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(__m128) == 0);
// Mask for the sign bit.
const __m128 vsign_mask = _mm_set1_ps(-0.0f);
// The largest z for which tanhf(z) is saturated at -1.0f.
const __m128 vsat_cutoff = _mm_set1_ps(-0x1.205968p+3f);
const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
// Large number such that ulp(magic bias) == exp2(-4)
const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p+19f);
// Mask for the lowest 3 bits
const __m128i vindex_mask = _mm_set1_epi32(0x7);
const __m128 vminus_ln2 = _mm_set1_ps(-0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const __m128 vc4 = _mm_set1_ps(0x1.5558ECp-1f);
const __m128 vc3 = _mm_set1_ps(0x1.555C20p+0f);
const __m128 vc2 = _mm_set1_ps(0x1.000000p+1f);
const __m128 vminus_two = _mm_set1_ps(-2.0f);
const __m128 vminus_one = _mm_set1_ps(-1.0f);
for (; n != 0; n -= sizeof(__m128)) {
const __m128 vx = _mm_load_ps(input);
input += 4;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x <= 0
// f(x) :=
// \ -f(-x) if x >= 0
//
// First we compute f(z) := expm1(2z) / (2 + expm1(2z)) where z = -abs(x), then negate the result if x >= 0.
__m128 vz = _mm_or_ps(vx, vsign_mask);
// Inverted mask for the sign of input: 0x00000000 for negative x, 0x80000000 for positive x.
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
// The function saturates at -1 for large negative inputs: tanhf(z) == -1.0f for z <= sat_cutoff ~= -9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = _mm_max_ps(vsat_cutoff, vz);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [-9.010913, 0]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. -9.010913 <= z <= 0. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for -9.010913 <= z <= 0 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#endif
const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
// Adjust exponent of the value l fetched from the table to get the final s value.
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const __m128 vps = _mm_mul_ps(vp, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vt, vps), vsmo);
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
// Reconstruct tanh(z) = expm1(2z) / (expm1(2z) + 2)
__m128 vy = _mm_div_ps(vemo, vepo);
// Reconstruct tanh(x):
//
// / tanh(z) if x <= 0
// tanh(x) =
// \ -tanh(z) if x >= 0
vy = _mm_xor_ps(vy, vinvsignx);
_mm_store_ps(output, vy);
output += 4;
}
}
| 7,425
| 45.4125
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-sse2-expm1minus-rr1-p6h5ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__sse2_expm1minus_rr1_p6h5ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(__m128) == 0);
// Mask for the sign bit.
const __m128 vsign_mask = _mm_set1_ps(-0.0f);
// The largest z for which tanhf(z) is saturated at -1.0f.
const __m128 vsat_cutoff = _mm_set1_ps(-0x1.205968p+3f);
const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp+22f);
const __m128 vminus_ln2 = _mm_set1_ps(-0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const __m128 vc6 = _mm_set1_ps(0x1.6B7338p-4f);
const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-2f);
const __m128 vc4 = _mm_set1_ps(0x1.555716p-1f);
const __m128 vc3 = _mm_set1_ps(0x1.5554B0p+0f);
const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp+0f);
const __m128 vminus_two = _mm_set1_ps(-2.0f);
const __m128 vminus_one = _mm_set1_ps(-1.0f);
for (; n != 0; n -= sizeof(__m128)) {
const __m128 vx = _mm_load_ps(input);
input += 4;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x <= 0
// f(x) :=
// \ -f(-x) if x >= 0
//
// First we compute f(z) := expm1(2z) / (2 + expm1(2z)) where z = -abs(x), then negate the result if x >= 0.
__m128 vz = _mm_or_ps(vx, vsign_mask);
// Inverted mask for the sign of input: 0x00000000 for negative x, 0x80000000 for positive x.
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
// The function saturates at -1 for large negative inputs: tanhf(z) == -1.0f for z <= sat_cutoff ~= -9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = _mm_max_ps(vsat_cutoff, vz);
// Compute reduced argument n := round(z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [-9.010913, 0]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// -9.010913 <= z <= 0, and -13 <= n <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final n := round(z / log(2), 1) as a floating-point number.
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
// Compute degree-6 polynomial approximation for exp(2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const __m128 vepo = _mm_sub_ps(vemo, vminus_two);
// Reconstruct tanh(z) = expm1(2z) / (expm1(2z) + 2)
__m128 vy = _mm_div_ps(vemo, vepo);
// Reconstruct tanh(x):
//
// / tanh(z) if x <= 0
// tanh(x) =
// \ -tanh(z) if x >= 0
vy = _mm_xor_ps(vy, vinvsignx);
_mm_store_ps(output, vy);
output += 4;
}
}
| 5,050
| 40.065041
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-sse2-expm1minus-rr1-p6h5ts-nr1.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__sse2_expm1minus_rr1_p6h5ts_nr1(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(__m128) == 0);
// Mask for the sign bit.
const __m128 vsign_mask = _mm_set1_ps(-0.0f);
// The largest z for which tanhf(z) is saturated at -1.0f.
const __m128 vsat_cutoff = _mm_set1_ps(-0x1.205968p+3f);
const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp+22f);
const __m128 vminus_ln2 = _mm_set1_ps(-0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const __m128 vc6 = _mm_set1_ps(0x1.6B7338p-4f);
const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-2f);
const __m128 vc4 = _mm_set1_ps(0x1.555716p-1f);
const __m128 vc3 = _mm_set1_ps(0x1.5554B0p+0f);
const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp+0f);
const __m128 vminus_two = _mm_set1_ps(-2.0f);
const __m128 vminus_one = _mm_set1_ps(-1.0f);
for (; n != 0; n -= sizeof(__m128)) {
const __m128 vx = _mm_load_ps(input);
input += 4;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x <= 0
// f(x) :=
// \ -f(-x) if x >= 0
//
// First we compute f(z) := expm1(2z) / (2 + expm1(2z)) where z = -abs(x), then negate the result if x >= 0.
__m128 vz = _mm_or_ps(vx, vsign_mask);
// Inverted mask for the sign of input: 0x00000000 for negative x, 0x80000000 for positive x.
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
// The function saturates at -1 for large negative inputs: tanhf(z) == -1.0f for z <= sat_cutoff ~= -9.010913.
// To guarantee this behaviour, we compute the saturation mask here, and later use it to replace computed outputs
// with the saturation value (-1). Note that for NaN inputs the saturation mask is inactive.
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [-9.010913, 0]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// -9.010913 <= z <= 0, and -13 <= n <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final n := round(z / log(2), 1) as a floating-point number.
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
// Compute degree-6 polynomial approximation for exp(2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
// Use Newton-Raphson method (1 iteration) to compute reciprocal of the denominator.
// Note: 2 < exp(2z) + 1 <= 3, because z <= 0 and 0 < exp(2z) <= 1.
// Thus the reciprocal of the denominator never overflows.
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
// Reconstruct tanh(z) := expm1(2z) / (2 + expm1(2z))
__m128 vy = _mm_mul_ps(vemo, vrepo);
// Saturate tanh(z) at -1 for large inputs.
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
// Reconstruct tanh(x):
//
// / tanh(z) if x <= 0
// tanh(x) =
// \ -tanh(z) if x >= 0
vy = _mm_xor_ps(vy, vinvsignx);
_mm_store_ps(output, vy);
output += 4;
}
}
| 5,558
| 41.435115
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-sse2-expm1minus-rr1-p6h5ts-nr2.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__sse2_expm1minus_rr1_p6h5ts_nr2(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(__m128) == 0);
// Mask for the sign bit.
const __m128 vsign_mask = _mm_set1_ps(-0.0f);
// The largest z for which tanhf(z) is saturated at -1.0f.
const __m128 vsat_cutoff = _mm_set1_ps(-0x1.205968p+3f);
const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp+22f);
const __m128 vminus_ln2 = _mm_set1_ps(-0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const __m128 vc6 = _mm_set1_ps(0x1.6B7338p-4f);
const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-2f);
const __m128 vc4 = _mm_set1_ps(0x1.555716p-1f);
const __m128 vc3 = _mm_set1_ps(0x1.5554B0p+0f);
const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp+0f);
const __m128 vminus_two = _mm_set1_ps(-2.0f);
const __m128 vminus_one = _mm_set1_ps(-1.0f);
for (; n != 0; n -= sizeof(__m128)) {
const __m128 vx = _mm_load_ps(input);
input += 4;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x <= 0
// f(x) :=
// \ -f(-x) if x >= 0
//
// First we compute f(z) := expm1(2z) / (2 + expm1(2z)) where z = -abs(x), then negate the result if x >= 0.
__m128 vz = _mm_or_ps(vx, vsign_mask);
// Inverted mask for the sign of input: 0x00000000 for negative x, 0x80000000 for positive x.
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
// The function saturates at -1 for large negative inputs: tanhf(z) == -1.0f for z <= sat_cutoff ~= -9.010913.
// To guarantee this behaviour, we compute the saturation mask here, and later use it to replace computed outputs
// with the saturation value (-1). Note that for NaN inputs the saturation mask is inactive.
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [-9.010913, 0]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// -9.010913 <= z <= 0, and -13 <= n <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final n := round(z / log(2), 1) as a floating-point number.
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
const __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2), vz);
// Compute degree-6 polynomial approximation for exp(2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
// Use Newton-Raphson method (2 iterations) to compute reciprocal of the denominator.
// Note: 2 < exp(2z) + 1 <= 3, because z <= 0 and 0 < exp(2z) <= 1.
// Thus the reciprocal of the denominator never overflows.
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
// Reconstruct tanh(z) := expm1(2z) / (2 + expm1(2z))
__m128 vy = _mm_mul_ps(vemo, vrepo);
// Saturate tanh(z) at -1 for large inputs.
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
// Reconstruct tanh(x):
//
// / tanh(z) if x <= 0
// tanh(x) =
// \ -tanh(z) if x >= 0
vy = _mm_xor_ps(vy, vinvsignx);
_mm_store_ps(output, vy);
output += 4;
}
}
| 5,639
| 41.727273
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-sse2-expm1minus-rr2-lut8-p4h2ts-nr1.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__sse2_expm1minus_rr2_lut8_p4h2ts_nr1(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(__m128) == 0);
// Mask for the sign bit.
const __m128 vsign_mask = _mm_set1_ps(-0.0f);
// The largest z for which tanhf(z) is saturated at -1.0f.
const __m128 vsat_cutoff = _mm_set1_ps(-0x1.205968p+3f);
const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
// Large number such that ulp(magic bias) == exp2(-4)
const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p+19f);
// Mask for the lowest 3 bits
const __m128i vindex_mask = _mm_set1_epi32(0x7);
// Last 7 bits are zeroes
const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/32, log(2)/32]
const __m128 vc4 = _mm_set1_ps(0x1.5558ECp-2f);
const __m128 vc3 = _mm_set1_ps(0x1.555C20p-1f);
const __m128 vc2 = _mm_set1_ps(0x1.000000p+0f);
const __m128 vminus_one = _mm_set1_ps(-1.0f);
const __m128 vminus_two = _mm_set1_ps(-2.0f);
for (; n != 0; n -= sizeof(__m128)) {
const __m128 vx = _mm_load_ps(input);
input += 4;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x <= 0
// f(x) :=
// \ -f(-x) if x >= 0
//
// First we compute f(z) := expm1(2z) / (2 + expm1(2z)) where z = -abs(x), then negate the result if x >= 0.
__m128 vz = _mm_or_ps(vx, vsign_mask);
// Inverted mask for the sign of input: 0x00000000 for negative x, 0x80000000 for positive x.
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
// The function saturates at -1 for large negative inputs: tanhf(z) == -1.0f for z <= sat_cutoff ~= -9.010913.
// To guarantee this behaviour, we compute the saturation mask here, and later use it to replace computed outputs
// with the saturation value (-1). Note that for NaN inputs the saturation mask is inactive.
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [-9.010913, 0]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. -9.010913 <= z <= 0. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for -9.010913 <= z <= 0 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#endif
const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
// Adjust exponent of the value l fetched from the table to get the final s value.
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = 2 * (t + t * p)
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
vp = _mm_add_ps(_mm_mul_ps(vp, vts), vts);
const __m128 vemo = _mm_sub_ps(vsmo, _mm_mul_ps(vp, vminus_two));
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
// Use Newton-Raphson method (1 iteration) to compute reciprocal of the denominator.
// Note: 2 < exp(2z) + 1 <= 3, because z <= 0 and 0 < exp(2z) <= 1.
// Thus the reciprocal of the denominator never overflows.
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
// Reconstruct tanh(z) := expm1(2z) / (2 + expm1(2z))
__m128 vy = _mm_mul_ps(vemo, vrepo);
// Saturate tanh(z) at -1 for large inputs.
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
// Reconstruct tanh(x):
//
// / tanh(z) if x <= 0
// tanh(x) =
// \ -tanh(z) if x >= 0
vy = _mm_xor_ps(vy, vinvsignx);
_mm_store_ps(output, vy);
output += 4;
}
}
| 8,270
| 46.809249
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-sse2-expm1minus-rr2-lut8-p4h2ts-nr2.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__sse2_expm1minus_rr2_lut8_p4h2ts_nr2(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(__m128) == 0);
// Mask for the sign bit.
const __m128 vsign_mask = _mm_set1_ps(-0.0f);
// The largest z for which tanhf(z) is saturated at -1.0f.
const __m128 vsat_cutoff = _mm_set1_ps(-0x1.205968p+3f);
const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
// Large number such that ulp(magic bias) == exp2(-4)
const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p+19f);
// Mask for the lowest 3 bits
const __m128i vindex_mask = _mm_set1_epi32(0x7);
// Last 7 bits are zeroes
const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// on [-log(2)/32, log(2)/32]
const __m128 vc4 = _mm_set1_ps(0x1.5558ECp-2f);
const __m128 vc3 = _mm_set1_ps(0x1.555C20p-1f);
const __m128 vc2 = _mm_set1_ps(0x1.000000p+0f);
const __m128 vminus_one = _mm_set1_ps(-1.0f);
const __m128 vminus_two = _mm_set1_ps(-2.0f);
for (; n != 0; n -= sizeof(__m128)) {
const __m128 vx = _mm_load_ps(input);
input += 4;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x <= 0
// f(x) :=
// \ -f(-x) if x >= 0
//
// First we compute f(z) := expm1(2z) / (2 + expm1(2z)) where z = -abs(x), then negate the result if x >= 0.
__m128 vz = _mm_or_ps(vx, vsign_mask);
// Inverted mask for the sign of input: 0x00000000 for negative x, 0x80000000 for positive x.
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
// The function saturates at -1 for large negative inputs: tanhf(z) == -1.0f for z <= sat_cutoff ~= -9.010913.
// To guarantee this behaviour, we compute the saturation mask here, and later use it to replace computed outputs
// with the saturation value (-1). Note that for NaN inputs the saturation mask is inactive.
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [-9.010913, 0]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. -9.010913 <= z <= 0. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for -9.010913 <= z <= 0 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#endif
const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
// Adjust exponent of the value l fetched from the table to get the final s value.
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = 2 * (t + t * (t * (c2 + t * (c3 + t * c4))))
// = 2 * (t + t * p)
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (2 * (t + t * (t * (c2 + t * (c3 + t * c4)))) + 1) - 1
// = s * (2 * (t + t * p) + 1) - 1
// = (s - 1) + 2 * ((t * s) + (t * s) * p)
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
vp = _mm_add_ps(_mm_mul_ps(vp, vts), vts);
const __m128 vemo = _mm_sub_ps(vsmo, _mm_mul_ps(vp, vminus_two));
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
// Use Newton-Raphson method (2 iterations) to compute reciprocal of the denominator.
// Note: 2 < exp(2z) + 1 <= 3, because z <= 0 and 0 < exp(2z) <= 1.
// Thus the reciprocal of the denominator never overflows.
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
// Reconstruct tanh(z) := expm1(2z) / (2 + expm1(2z))
__m128 vy = _mm_mul_ps(vemo, vrepo);
// Saturate tanh(z) at -1 for large inputs.
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
// Reconstruct tanh(x):
//
// / tanh(z) if x <= 0
// tanh(x) =
// \ -tanh(z) if x >= 0
vy = _mm_xor_ps(vy, vinvsignx);
_mm_store_ps(output, vy);
output += 4;
}
}
| 8,351
| 47
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-sse2-expm1minus-rr2-lut8-p4h3ps-nr1.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__sse2_expm1minus_rr2_lut8_p4h3ps_nr1(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(__m128) == 0);
// Mask for the sign bit.
const __m128 vsign_mask = _mm_set1_ps(-0.0f);
// The largest z for which tanhf(z) is saturated at -1.0f.
const __m128 vsat_cutoff = _mm_set1_ps(-0x1.205968p+3f);
const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
// Large number such that ulp(magic bias) == exp2(-4)
const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p+19f);
// Mask for the lowest 3 bits
const __m128i vindex_mask = _mm_set1_epi32(0x7);
// Last 7 bits are zeroes
const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const __m128 vc4 = _mm_set1_ps(0x1.5558ECp-1f);
const __m128 vc3 = _mm_set1_ps(0x1.555C20p+0f);
const __m128 vc2 = _mm_set1_ps(0x1.000000p+1f);
const __m128 vminus_two = _mm_set1_ps(-2.0f);
const __m128 vminus_one = _mm_set1_ps(-1.0f);
for (; n != 0; n -= sizeof(__m128)) {
const __m128 vx = _mm_load_ps(input);
input += 4;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x <= 0
// f(x) :=
// \ -f(-x) if x >= 0
//
// First we compute f(z) := expm1(2z) / (2 + expm1(2z)) where z = -abs(x), then negate the result if x >= 0.
__m128 vz = _mm_or_ps(vx, vsign_mask);
// Inverted mask for the sign of input: 0x00000000 for negative x, 0x80000000 for positive x.
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
// The function saturates at -1 for large negative inputs: tanhf(z) == -1.0f for z <= sat_cutoff ~= -9.010913.
// To guarantee this behaviour, we compute the saturation mask here, and later use it to replace computed outputs
// with the saturation value (-1). Note that for NaN inputs the saturation mask is inactive.
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [-9.010913, 0]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. -9.010913 <= z <= 0. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for -9.010913 <= z <= 0 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#endif
const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
// Adjust exponent of the value l fetched from the table to get the final s value.
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const __m128 vps = _mm_mul_ps(vp, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vt, vps), vsmo);
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
// Use Newton-Raphson method (1 iteration) to compute reciprocal of the denominator.
// Note: 2 < exp(2z) + 1 <= 3, because z <= 0 and 0 < exp(2z) <= 1.
// Thus the reciprocal of the denominator never overflows.
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
// Reconstruct tanh(z) := expm1(2z) / (2 + expm1(2z))
__m128 vy = _mm_mul_ps(vemo, vrepo);
// Saturate tanh(z) at -1 for large inputs.
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
// Reconstruct tanh(x):
//
// / tanh(z) if x <= 0
// tanh(x) =
// \ -tanh(z) if x >= 0
vy = _mm_xor_ps(vy, vinvsignx);
_mm_store_ps(output, vy);
output += 4;
}
}
| 8,186
| 46.598837
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-sse2-expm1minus-rr2-lut8-p4h3ps-nr2.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__sse2_expm1minus_rr2_lut8_p4h3ps_nr2(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(__m128) == 0);
// Mask for the sign bit.
const __m128 vsign_mask = _mm_set1_ps(-0.0f);
// The largest z for which tanhf(z) is saturated at -1.0f.
const __m128 vsat_cutoff = _mm_set1_ps(-0x1.205968p+3f);
const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
// Large number such that ulp(magic bias) == exp2(-4)
const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p+19f);
// Mask for the lowest 3 bits
const __m128i vindex_mask = _mm_set1_epi32(0x7);
// Last 7 bits are zeroes
const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const __m128 vc4 = _mm_set1_ps(0x1.5558ECp-1f);
const __m128 vc3 = _mm_set1_ps(0x1.555C20p+0f);
const __m128 vc2 = _mm_set1_ps(0x1.000000p+1f);
const __m128 vminus_two = _mm_set1_ps(-2.0f);
const __m128 vminus_one = _mm_set1_ps(-1.0f);
for (; n != 0; n -= sizeof(__m128)) {
const __m128 vx = _mm_load_ps(input);
input += 4;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x <= 0
// f(x) :=
// \ -f(-x) if x >= 0
//
// First we compute f(z) := expm1(2z) / (2 + expm1(2z)) where z = -abs(x), then negate the result if x >= 0.
__m128 vz = _mm_or_ps(vx, vsign_mask);
// Inverted mask for the sign of input: 0x00000000 for negative x, 0x80000000 for positive x.
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
// The function saturates at -1 for large negative inputs: tanhf(z) == -1.0f for z <= sat_cutoff ~= -9.010913.
// To guarantee this behaviour, we compute the saturation mask here, and later use it to replace computed outputs
// with the saturation value (-1). Note that for NaN inputs the saturation mask is inactive.
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [-9.010913, 0]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. -9.010913 <= z <= 0. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for -9.010913 <= z <= 0 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#endif
const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
// Adjust exponent of the value l fetched from the table to get the final s value.
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const __m128 vps = _mm_mul_ps(vp, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vt, vps), vsmo);
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
// Use Newton-Raphson method (2 iterations) to compute reciprocal of the denominator.
// Note: 2 < exp(2z) + 1 <= 3, because z <= 0 and 0 < exp(2z) <= 1.
// Thus the reciprocal of the denominator never overflows.
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
// Reconstruct tanh(z) := expm1(2z) / (2 + expm1(2z))
__m128 vy = _mm_mul_ps(vemo, vrepo);
// Saturate tanh(z) at -1 for large inputs.
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
// Reconstruct tanh(x):
//
// / tanh(z) if x <= 0
// tanh(x) =
// \ -tanh(z) if x >= 0
vy = _mm_xor_ps(vy, vinvsignx);
_mm_store_ps(output, vy);
output += 4;
}
}
| 8,267
| 46.791908
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-sse2-expm1minus-rr2-lut8-p4h3ts-nr1.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__sse2_expm1minus_rr2_lut8_p4h3ts_nr1(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(__m128) == 0);
// Mask for the sign bit.
const __m128 vsign_mask = _mm_set1_ps(-0.0f);
// The largest z for which tanhf(z) is saturated at -1.0f.
const __m128 vsat_cutoff = _mm_set1_ps(-0x1.205968p+3f);
const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
// Large number such that ulp(magic bias) == exp2(-4)
const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p+19f);
// Mask for the lowest 3 bits
const __m128i vindex_mask = _mm_set1_epi32(0x7);
// Last 7 bits are zeroes
const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const __m128 vc4 = _mm_set1_ps(0x1.5558ECp-1f);
const __m128 vc3 = _mm_set1_ps(0x1.555C20p+0f);
const __m128 vc2 = _mm_set1_ps(0x1.000000p+1f);
const __m128 vminus_two = _mm_set1_ps(-2.0f);
const __m128 vminus_one = _mm_set1_ps(-1.0f);
for (; n != 0; n -= sizeof(__m128)) {
const __m128 vx = _mm_load_ps(input);
input += 4;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x <= 0
// f(x) :=
// \ -f(-x) if x >= 0
//
// First we compute f(z) := expm1(2z) / (2 + expm1(2z)) where z = -abs(x), then negate the result if x >= 0.
__m128 vz = _mm_or_ps(vx, vsign_mask);
// Inverted mask for the sign of input: 0x00000000 for negative x, 0x80000000 for positive x.
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
// The function saturates at -1 for large negative inputs: tanhf(z) == -1.0f for z <= sat_cutoff ~= -9.010913.
// To guarantee this behaviour, we compute the saturation mask here, and later use it to replace computed outputs
// with the saturation value (-1). Note that for NaN inputs the saturation mask is inactive.
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [-9.010913, 0]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. -9.010913 <= z <= 0. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for -9.010913 <= z <= 0 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#endif
const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
// Adjust exponent of the value l fetched from the table to get the final s value.
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
// Use Newton-Raphson method (1 iteration) to compute reciprocal of the denominator.
// Note: 2 < exp(2z) + 1 <= 3, because z <= 0 and 0 < exp(2z) <= 1.
// Thus the reciprocal of the denominator never overflows.
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
// Reconstruct tanh(z) := expm1(2z) / (2 + expm1(2z))
__m128 vy = _mm_mul_ps(vemo, vrepo);
// Saturate tanh(z) at -1 for large inputs.
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
// Reconstruct tanh(x):
//
// / tanh(z) if x <= 0
// tanh(x) =
// \ -tanh(z) if x >= 0
vy = _mm_xor_ps(vy, vinvsignx);
_mm_store_ps(output, vy);
output += 4;
}
}
| 8,186
| 46.598837
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-sse2-expm1minus-rr2-lut8-p4h3ts-nr2.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-sse-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__sse2_expm1minus_rr2_lut8_p4h3ts_nr2(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(__m128) == 0);
// Mask for the sign bit.
const __m128 vsign_mask = _mm_set1_ps(-0.0f);
// The largest z for which tanhf(z) is saturated at -1.0f.
const __m128 vsat_cutoff = _mm_set1_ps(-0x1.205968p+3f);
const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
// Large number such that ulp(magic bias) == exp2(-4)
const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p+19f);
// Mask for the lowest 3 bits
const __m128i vindex_mask = _mm_set1_epi32(0x7);
// Last 7 bits are zeroes
const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const __m128 vc4 = _mm_set1_ps(0x1.5558ECp-1f);
const __m128 vc3 = _mm_set1_ps(0x1.555C20p+0f);
const __m128 vc2 = _mm_set1_ps(0x1.000000p+1f);
const __m128 vminus_two = _mm_set1_ps(-2.0f);
const __m128 vminus_one = _mm_set1_ps(-1.0f);
for (; n != 0; n -= sizeof(__m128)) {
const __m128 vx = _mm_load_ps(input);
input += 4;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x <= 0
// f(x) :=
// \ -f(-x) if x >= 0
//
// First we compute f(z) := expm1(2z) / (2 + expm1(2z)) where z = -abs(x), then negate the result if x >= 0.
__m128 vz = _mm_or_ps(vx, vsign_mask);
// Inverted mask for the sign of input: 0x00000000 for negative x, 0x80000000 for positive x.
const __m128 vinvsignx = _mm_xor_ps(vx, vz);
// The function saturates at -1 for large negative inputs: tanhf(z) == -1.0f for z <= sat_cutoff ~= -9.010913.
// To guarantee this behaviour, we compute the saturation mask here, and later use it to replace computed outputs
// with the saturation value (-1). Note that for NaN inputs the saturation mask is inactive.
const __m128 vm = _mm_cmple_ps(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [-9.010913, 0]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. -9.010913 <= z <= 0. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for -9.010913 <= z <= 0 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 20);
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
#if XNN_ARCH_X86_64
__m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
vidx = _mm_unpackhi_epi64(vidx, vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_lo]);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_lo >> 32)]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) vidx_hi]);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[(uint32_t) (vidx_hi >> 32)]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#else
const __m128i vidx = _mm_and_si128(_mm_castps_si128(vn), vindex_mask);
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const __m128i vl0 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx0]);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const __m128i vl1 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx1]);
const __m128i vl_lo = _mm_unpacklo_epi32(vl0, vl1);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const __m128i vl2 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx2]);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl3 = _mm_cvtsi32_si128((int) xnn_table_exp2minus_k_over_8[vidx3]);
const __m128i vl_hi = _mm_unpacklo_epi32(vl2, vl3);
#endif
const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
// Adjust exponent of the value l fetched from the table to get the final s value.
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
__m128 vp = _mm_add_ps(_mm_mul_ps(vc4, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_sub_ps(_mm_mul_ps(vp, vt), vminus_two);
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const __m128 vts = _mm_mul_ps(vt, vs);
const __m128 vsmo = _mm_add_ps(vs, vminus_one);
const __m128 vemo = _mm_add_ps(_mm_mul_ps(vp, vts), vsmo);
// Denominator of the tanh fraction: exp(2z) + 1 = expm1(2z) + 2
const __m128 vepo = _mm_sub_ps(vminus_two, vemo);
// Use Newton-Raphson method (2 iterations) to compute reciprocal of the denominator.
// Note: 2 < exp(2z) + 1 <= 3, because z <= 0 and 0 < exp(2z) <= 1.
// Thus the reciprocal of the denominator never overflows.
__m128 vrepo = _mm_rcp_ps(vepo);
vrepo = _mm_mul_ps(vrepo, _mm_add_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
vrepo = _mm_mul_ps(vrepo, _mm_sub_ps(_mm_mul_ps(vrepo, vepo), vminus_two));
// Reconstruct tanh(z) := expm1(2z) / (2 + expm1(2z))
__m128 vy = _mm_mul_ps(vemo, vrepo);
// Saturate tanh(z) at -1 for large inputs.
vy = _mm_or_ps(_mm_andnot_ps(vm, vy), _mm_and_ps(vminus_one, vm));
// Reconstruct tanh(x):
//
// / tanh(z) if x <= 0
// tanh(x) =
// \ -tanh(z) if x >= 0
vy = _mm_xor_ps(vy, vinvsignx);
_mm_store_ps(output, vy);
output += 4;
}
}
| 8,267
| 46.791908
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-wasm-expm1minus-rr1-lut8-p4h3ps-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__wasm_expm1minus_rr1_lut8_p4h3ps_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p+19f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.5558ECp-1f;
const float vc3 = -0x1.555C20p+0f;
const float vc2 = 0x1.000000p+1f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = __builtin_wasm_min_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t vb = float_as_uint32(vn);
const uint32_t ve = vb << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = vb & vindex_mask;
const uint32_t vl = xnn_table_exp2minus_k_over_8[vidx];
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(vl + ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const float vps = vp * vs;
const float vsmo = vs - vone;
const float vemo = vt * vps + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 5,165
| 40.328
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-wasm-expm1minus-rr1-p6h5ts-div.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-scalar-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__wasm_expm1minus_rr1_p6h5ts_div(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const float vsat_cutoff = 0x1.205968p+3f;
const float vminus_log2e = -0x1.715476p+0f;
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const float vmagic_bias = 0x1.8000FEp+22f;
const float vln2 = 0x1.62E430p-1f;
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const float vc6 = 0x1.6B7338p-4f;
const float vc5 = -0x1.12278Ep-2f;
const float vc4 = 0x1.555716p-1f;
const float vc3 = -0x1.5554B0p+0f;
const float vc2 = 0x1.FFFFFEp+0f;
const float vminus_two = -2.0f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
float vz = fabsf(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = __builtin_wasm_min_f32(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vz * vminus_log2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const float vt = vn * vln2 + vz;
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vminus_two;
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const float vts = vt * vs;
const float vsmo = vs - vone;
const float vemo = vp * vts + vsmo;
// Denominator of the tanh fraction: exp(-2z) + 1 = expm1(-2z) + 2
const float vepo = vemo - vminus_two;
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
float vy = vemo / vepo;
// Reconstruct tanh(x) = copysign(y, x)
vy = copysignf(vy, vx);
*output++ = vy;
}
}
| 4,308
| 38.172727
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-wasmsimd-expm1minus-rr1-lut8-p4h3ps-div-abs-min.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__wasmsimd_expm1minus_rr1_lut8_p4h3ps_div_abs_min(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(v128_t) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const v128_t vsat_cutoff = wasm_f32x4_const_splat(0x1.205968p+3f);
const v128_t vminus_log2e = wasm_f32x4_const_splat(-0x1.715476p+0f);
// Large number such that ulp(magic bias) == exp2(-4)
const v128_t vmagic_bias = wasm_f32x4_const_splat(0x1.800000p+19f);
// Mask for the lowest 3 bits
const v128_t vindex_mask = wasm_u32x4_const_splat(UINT32_C(0x7));
const v128_t vln2 = wasm_f32x4_const_splat(0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const v128_t vc4 = wasm_f32x4_const_splat(0x1.5558ECp-1f);
const v128_t vc3 = wasm_f32x4_const_splat(-0x1.555C20p+0f);
const v128_t vc2 = wasm_f32x4_const_splat(0x1.000000p+1f);
const v128_t vminus_two = wasm_f32x4_const_splat(-2.0f);
const v128_t vone = wasm_f32x4_const_splat(1.0f);
// Mask for the sign bit.
const v128_t vsign_mask = wasm_f32x4_const_splat(-0.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
v128_t vz = wasm_f32x4_abs(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(-sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = wasm_f32x4_min(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const v128_t ve = wasm_i32x4_shl(vn, 20);
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
// Adjust exponent of the value l fetched from the table to get the final s value.
const v128_t vs = wasm_i32x4_add(vl, ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn = wasm_f32x4_sub(vn, vmagic_bias);
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const v128_t vps = wasm_f32x4_mul(vp, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vt, vps), vsmo);
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
// Reconstruct tanh(x) = copysign(y, x)
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
}
| 6,362
| 47.204545
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-wasmsimd-expm1minus-rr1-lut8-p4h3ps-div-abs-pmin.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__wasmsimd_expm1minus_rr1_lut8_p4h3ps_div_abs_pmin(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(v128_t) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const v128_t vsat_cutoff = wasm_f32x4_const_splat(0x1.205968p+3f);
const v128_t vminus_log2e = wasm_f32x4_const_splat(-0x1.715476p+0f);
// Large number such that ulp(magic bias) == exp2(-4)
const v128_t vmagic_bias = wasm_f32x4_const_splat(0x1.800000p+19f);
// Mask for the lowest 3 bits
const v128_t vindex_mask = wasm_u32x4_const_splat(UINT32_C(0x7));
const v128_t vln2 = wasm_f32x4_const_splat(0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const v128_t vc4 = wasm_f32x4_const_splat(0x1.5558ECp-1f);
const v128_t vc3 = wasm_f32x4_const_splat(-0x1.555C20p+0f);
const v128_t vc2 = wasm_f32x4_const_splat(0x1.000000p+1f);
const v128_t vminus_two = wasm_f32x4_const_splat(-2.0f);
const v128_t vone = wasm_f32x4_const_splat(1.0f);
// Mask for the sign bit.
const v128_t vsign_mask = wasm_f32x4_const_splat(-0.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
v128_t vz = wasm_f32x4_abs(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(-sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. 0 <= z <= 9.010913. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for 0 <= z <= 9.010913 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const v128_t ve = wasm_i32x4_shl(vn, 20);
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
// Adjust exponent of the value l fetched from the table to get the final s value.
const v128_t vs = wasm_i32x4_add(vl, ve);
// Subtract the large number back to get final n := round(-z / log(2), 4) as a floating-point number.
vn = wasm_f32x4_sub(vn, vmagic_bias);
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
// Compute degree-4 polynomial approximation for exp(-2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const v128_t vps = wasm_f32x4_mul(vp, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vt, vps), vsmo);
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
// Reconstruct tanh(x) = copysign(y, x)
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
}
| 6,364
| 47.219697
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-wasmsimd-expm1minus-rr1-lut8-p4h3ps-div-nabs-max.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__wasmsimd_expm1minus_rr1_lut8_p4h3ps_div_nabs_max(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(v128_t) == 0);
// Mask for the sign bit.
const v128_t vsign_mask = wasm_f32x4_const_splat(-0.0f);
// The largest z for which tanhf(z) is saturated at -1.0f.
const v128_t vsat_cutoff = wasm_f32x4_const_splat(-0x1.205968p+3f);
const v128_t vlog2e = wasm_f32x4_const_splat(0x1.715476p+0f);
// Large number such that ulp(magic bias) == exp2(-4)
const v128_t vmagic_bias = wasm_f32x4_const_splat(0x1.800000p+19f);
// Mask for the lowest 3 bits
const v128_t vindex_mask = wasm_u32x4_const_splat(UINT32_C(0x7));
const v128_t vminus_ln2 = wasm_f32x4_const_splat(-0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const v128_t vc4 = wasm_f32x4_const_splat(0x1.5558ECp-1f);
const v128_t vc3 = wasm_f32x4_const_splat(0x1.555C20p+0f);
const v128_t vc2 = wasm_f32x4_const_splat(0x1.000000p+1f);
const v128_t vtwo = wasm_f32x4_const_splat(2.0f);
const v128_t vone = wasm_f32x4_const_splat(1.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x <= 0
// f(x) :=
// \ -f(-x) if x >= 0
//
// First we compute f(z) := expm1(2z) / (2 + expm1(2z)) where z = -abs(x), then negate the result if x >= 0.
v128_t vz = wasm_v128_or(vx, vsign_mask);
// Inverted mask for the sign of input: 0x00000000 for negative x, 0x80000000 for positive x.
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
// The function saturates at -1 for large negative inputs: tanhf(z) == -1.0f for z <= sat_cutoff ~= -9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = wasm_f32x4_max(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [-9.010913, 0]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. -9.010913 <= z <= 0. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for -9.010913 <= z <= 0 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const v128_t ve = wasm_i32x4_shl(vn, 20);
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
// Adjust exponent of the value l fetched from the table to get the final s value.
const v128_t vs = wasm_i32x4_add(vl, ve);
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn = wasm_f32x4_sub(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const v128_t vps = wasm_f32x4_mul(vp, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vt, vps), vsmo);
// Reconstruct tanh(z) = expm1(2z) / (expm1(2z) + 2)
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
// Reconstruct tanh(x):
//
// / tanh(z) if x <= 0
// tanh(x) =
// \ -tanh(z) if x >= 0
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
}
| 6,498
| 46.094203
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-wasmsimd-expm1minus-rr1-lut8-p4h3ps-div-nabs-pmax.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_tanh__wasmsimd_expm1minus_rr1_lut8_p4h3ps_div_nabs_pmax(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(v128_t) == 0);
// Mask for the sign bit.
const v128_t vsign_mask = wasm_f32x4_const_splat(-0.0f);
// The largest z for which tanhf(z) is saturated at -1.0f.
const v128_t vsat_cutoff = wasm_f32x4_const_splat(-0x1.205968p+3f);
const v128_t vlog2e = wasm_f32x4_const_splat(0x1.715476p+0f);
// Large number such that ulp(magic bias) == exp2(-4)
const v128_t vmagic_bias = wasm_f32x4_const_splat(0x1.800000p+19f);
// Mask for the lowest 3 bits
const v128_t vindex_mask = wasm_u32x4_const_splat(UINT32_C(0x7));
const v128_t vminus_ln2 = wasm_f32x4_const_splat(-0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const v128_t vc4 = wasm_f32x4_const_splat(0x1.5558ECp-1f);
const v128_t vc3 = wasm_f32x4_const_splat(0x1.555C20p+0f);
const v128_t vc2 = wasm_f32x4_const_splat(0x1.000000p+1f);
const v128_t vtwo = wasm_f32x4_const_splat(2.0f);
const v128_t vone = wasm_f32x4_const_splat(1.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x <= 0
// f(x) :=
// \ -f(-x) if x >= 0
//
// First we compute f(z) := expm1(2z) / (2 + expm1(2z)) where z = -abs(x), then negate the result if x >= 0.
v128_t vz = wasm_v128_or(vx, vsign_mask);
// Inverted mask for the sign of input: 0x00000000 for negative x, 0x80000000 for positive x.
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
// The function saturates at -1 for large negative inputs: tanhf(z) == -1.0f for z <= sat_cutoff ~= -9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**18, i.e. |z| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [-9.010913, 0]) saturate tanhf(x).
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**(2n) for valid inputs, i.e. -9.010913 <= z <= 0. As
// n has 4 fractional bits, we split s == 2**(2n) = 2**int(2n) * 2**frac(2n). We create s in two steps:
// 1. Fetch 2**frac(2n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their unbiased floating-point exponent is 0.
// 2. Adjust fetched value by addition of int(2n) to its floating-point exponent. The result is always a normalized
// number, because for -9.010913 <= z <= 0 we have -13 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -13.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const v128_t ve = wasm_i32x4_shl(vn, 20);
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint64_t vidx_lo = wasm_u64x2_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_lo));
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_lo >> 32)), vl, 1);
const uint64_t vidx_hi = wasm_u64x2_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) vidx_hi), vl, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_8 + (uint32_t) (vidx_hi >> 32)), vl, 3);
// Adjust exponent of the value l fetched from the table to get the final s value.
const v128_t vs = wasm_i32x4_add(vl, ve);
// Subtract the large number back to get final n := round(z / log(2), 4) as a floating-point number.
vn = wasm_f32x4_sub(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
// Compute degree-4 polynomial approximation for exp(2t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * c4)))
// = t * p
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc4, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * c4))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (p * s) * t
const v128_t vps = wasm_f32x4_mul(vp, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vt, vps), vsmo);
// Reconstruct tanh(z) = expm1(2z) / (expm1(2z) + 2)
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
// Reconstruct tanh(x):
//
// / tanh(z) if x <= 0
// tanh(x) =
// \ -tanh(z) if x >= 0
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
}
| 6,500
| 46.108696
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-wasmsimd-expm1minus-rr1-p6h5ts-div-abs-min.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__wasmsimd_expm1minus_rr1_p6h5ts_div_abs_min(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(v128_t) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const v128_t vsat_cutoff = wasm_f32x4_const_splat(0x1.205968p+3f);
const v128_t vminus_log2e = wasm_f32x4_const_splat(-0x1.715476p+0f);
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const v128_t vmagic_bias = wasm_f32x4_const_splat(0x1.8000FEp+22f);
const v128_t vln2 = wasm_f32x4_const_splat(0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const v128_t vc6 = wasm_f32x4_const_splat(0x1.6B7338p-4f);
const v128_t vc5 = wasm_f32x4_const_splat(-0x1.12278Ep-2f);
const v128_t vc4 = wasm_f32x4_const_splat(0x1.555716p-1f);
const v128_t vc3 = wasm_f32x4_const_splat(-0x1.5554B0p+0f);
const v128_t vc2 = wasm_f32x4_const_splat(0x1.FFFFFEp+0f);
const v128_t vminus_two = wasm_f32x4_const_splat(-2.0f);
const v128_t vone = wasm_f32x4_const_splat(1.0f);
// Mask for the sign bit.
const v128_t vsign_mask = wasm_f32x4_const_splat(-0.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
v128_t vz = wasm_f32x4_abs(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(-sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = wasm_f32x4_min(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const v128_t vs = wasm_i32x4_shl(vn, 23);
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn = wasm_f32x4_sub(vn, vmagic_bias);
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
// Reconstruct tanh(x) = copysign(y, x)
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
}
| 5,023
| 43.460177
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-wasmsimd-expm1minus-rr1-p6h5ts-div-abs-pmin.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-wasmsimd-expm1minus-abs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__wasmsimd_expm1minus_rr1_p6h5ts_div_abs_pmin(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(v128_t) == 0);
// The smallest z for which tanhf(-z) is saturated at -1.0f.
const v128_t vsat_cutoff = wasm_f32x4_const_splat(0x1.205968p+3f);
const v128_t vminus_log2e = wasm_f32x4_const_splat(-0x1.715476p+0f);
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const v128_t vmagic_bias = wasm_f32x4_const_splat(0x1.8000FEp+22f);
const v128_t vln2 = wasm_f32x4_const_splat(0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(-2t) - 1 ~ t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const v128_t vc6 = wasm_f32x4_const_splat(0x1.6B7338p-4f);
const v128_t vc5 = wasm_f32x4_const_splat(-0x1.12278Ep-2f);
const v128_t vc4 = wasm_f32x4_const_splat(0x1.555716p-1f);
const v128_t vc3 = wasm_f32x4_const_splat(-0x1.5554B0p+0f);
const v128_t vc2 = wasm_f32x4_const_splat(0x1.FFFFFEp+0f);
const v128_t vminus_two = wasm_f32x4_const_splat(-2.0f);
const v128_t vone = wasm_f32x4_const_splat(1.0f);
// Mask for the sign bit.
const v128_t vsign_mask = wasm_f32x4_const_splat(-0.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
// General structure of the algorithm:
//
// / -expm1(-2x) / (2 + expm1(-2x)) if x >= 0
// f(x) :=
// \ -f(-x) if x <= 0
//
// First we compute y := expm1(-2z) / (2 + expm1(-2z)) where z = abs(x),
// then set its sign according to the sign of x: f(x) := sign(x) * abs(y).
v128_t vz = wasm_f32x4_abs(vx);
// The function saturates at -1 for large positive inputs: tanhf(-z) == -1.0f for z >= sat_cutoff ~= 9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(-sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = wasm_f32x4_pmin(vz, vsat_cutoff);
// Compute reduced argument n := round(-z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|-z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [0, 9.010913]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// 0 <= z <= 9.010913, and -13 <= n <= 0 accordingly.
const v128_t vs = wasm_i32x4_shl(vn, 23);
// Subtract the large number back to get final n := round(-z / log(2), 1) as a floating-point number.
vn = wasm_f32x4_sub(vn, vmagic_bias);
// Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2), vz);
// Compute degree-6 polynomial approximation for exp(-2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vminus_two);
// Reconstruct the exp(-2z) - 1 value:
// exp(-2z) - 1 = s * (t * (-2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
// Reconstruct y = expm1(-2z) / (expm1(-2z) + 2)
const v128_t vepo = wasm_f32x4_sub(vemo, vminus_two);
v128_t vy = wasm_f32x4_div(vemo, vepo);
// Reconstruct tanh(x) = copysign(y, x)
vy = wasm_v128_bitselect(vx, vy, vsign_mask);
wasm_v128_store(output, vy);
output += 4;
}
}
| 5,025
| 43.477876
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-wasmsimd-expm1minus-rr1-p6h5ts-div-nabs-max.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__wasmsimd_expm1minus_rr1_p6h5ts_div_nabs_max(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(v128_t) == 0);
// Mask for the sign bit.
const v128_t vsign_mask = wasm_f32x4_const_splat(-0.0f);
// The largest z for which tanhf(z) is saturated at -1.0f.
const v128_t vsat_cutoff = wasm_f32x4_const_splat(-0x1.205968p+3f);
const v128_t vlog2e = wasm_f32x4_const_splat(0x1.715476p+0f);
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const v128_t vmagic_bias = wasm_f32x4_const_splat(0x1.8000FEp+22f);
const v128_t vminus_ln2 = wasm_f32x4_const_splat(-0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const v128_t vc6 = wasm_f32x4_const_splat(0x1.6B7338p-4f);
const v128_t vc5 = wasm_f32x4_const_splat(0x1.12278Ep-2f);
const v128_t vc4 = wasm_f32x4_const_splat(0x1.555716p-1f);
const v128_t vc3 = wasm_f32x4_const_splat(0x1.5554B0p+0f);
const v128_t vc2 = wasm_f32x4_const_splat(0x1.FFFFFEp+0f);
const v128_t vtwo = wasm_f32x4_const_splat(2.0f);
const v128_t vone = wasm_f32x4_const_splat(1.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x <= 0
// f(x) :=
// \ -f(-x) if x >= 0
//
// First we compute f(z) := expm1(2z) / (2 + expm1(2z)) where z = -abs(x), then negate the result if x >= 0.
v128_t vz = wasm_v128_or(vx, vsign_mask);
// Inverted mask for the sign of input: 0x00000000 for negative x, 0x80000000 for positive x.
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
// The function saturates at -1 for large negative inputs: tanhf(z) == -1.0f for z <= sat_cutoff ~= -9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = wasm_f32x4_max(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [-9.010913, 0]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// -9.010913 <= z <= 0, and -13 <= n <= 0 accordingly.
const v128_t vs = wasm_i32x4_shl(vn, 23);
// Subtract the large number back to get final n := round(z / log(2), 1) as a floating-point number.
vn = wasm_f32x4_sub(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
// Compute degree-6 polynomial approximation for exp(2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
// Reconstruct tanh(z) = expm1(2z) / (expm1(2z) + 2)
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
// Reconstruct tanh(x):
//
// / tanh(z) if x <= 0
// tanh(x) =
// \ -tanh(z) if x >= 0
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
}
| 5,157
| 42.344538
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/gen/f32-tanh-wasmsimd-expm1minus-rr1-p6h5ts-div-nabs-pmax.c
|
// Auto-generated file. Do not edit!
// Template: src/math/f32-tanh-wasmsimd-expm1minus-nabs.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_tanh__wasmsimd_expm1minus_rr1_p6h5ts_div_nabs_pmax(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(v128_t) == 0);
// Mask for the sign bit.
const v128_t vsign_mask = wasm_f32x4_const_splat(-0.0f);
// The largest z for which tanhf(z) is saturated at -1.0f.
const v128_t vsat_cutoff = wasm_f32x4_const_splat(-0x1.205968p+3f);
const v128_t vlog2e = wasm_f32x4_const_splat(0x1.715476p+0f);
// Large number such that ulp(magic bias) == 0.5 and magic bias === 63.5 mod 2**21.
const v128_t vmagic_bias = wasm_f32x4_const_splat(0x1.8000FEp+22f);
const v128_t vminus_ln2 = wasm_f32x4_const_splat(-0x1.62E430p-1f);
// Coefficients of polynomial approximation
// exp(2t) - 1 ~ t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/4, log(2)/4]
const v128_t vc6 = wasm_f32x4_const_splat(0x1.6B7338p-4f);
const v128_t vc5 = wasm_f32x4_const_splat(0x1.12278Ep-2f);
const v128_t vc4 = wasm_f32x4_const_splat(0x1.555716p-1f);
const v128_t vc3 = wasm_f32x4_const_splat(0x1.5554B0p+0f);
const v128_t vc2 = wasm_f32x4_const_splat(0x1.FFFFFEp+0f);
const v128_t vtwo = wasm_f32x4_const_splat(2.0f);
const v128_t vone = wasm_f32x4_const_splat(1.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
// General structure of the algorithm:
//
// / expm1(2x) / (2 + expm1(2x)) if x <= 0
// f(x) :=
// \ -f(-x) if x >= 0
//
// First we compute f(z) := expm1(2z) / (2 + expm1(2z)) where z = -abs(x), then negate the result if x >= 0.
v128_t vz = wasm_v128_or(vx, vsign_mask);
// Inverted mask for the sign of input: 0x00000000 for negative x, 0x80000000 for positive x.
const v128_t vinvsignx = wasm_v128_xor(vx, vz);
// The function saturates at -1 for large negative inputs: tanhf(z) == -1.0f for z <= sat_cutoff ~= -9.010913.
// To guarantee this behaviour, we clip input z at sat_cutoff, and leverage the fact that for our implementation
// tanhf(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vz = wasm_f32x4_pmax(vz, vsat_cutoff);
// Compute reduced argument n := round(z / log(2), 1).
// We do it by adding a large number (magic bias), which cause rounding of the result to 1 fractional bit,
// then subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|z / log(2)| <= 2**21, i.e. |z| <= 0x1.62E43p+20 = 1453635.0), but that is acceptable, because inputs x
// outside of [-9.010913, 9.010913] (i.e. z outsize [-9.010913, 0]) saturate tanhf(x).
// Additionally, we fuse addition of the floating-point exponent bias (127) into the magic bias.
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**(2n) for inputs which don't cause underflow, i.e.
// -9.010913 <= z <= 0, and -13 <= n <= 0 accordingly.
const v128_t vs = wasm_i32x4_shl(vn, 23);
// Subtract the large number back to get final n := round(z / log(2), 1) as a floating-point number.
vn = wasm_f32x4_sub(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2), vz);
// Compute degree-6 polynomial approximation for exp(2t) - 1 on [-log(2)/4, log(2)/4].
// P(t) = t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t * p
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vtwo);
// Reconstruct the exp(2z) - 1 value:
// exp(2z) - 1 = s * (t * (2 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) + 1) - 1
// = s * t * p + (s - 1)
// = (s - 1) + (t * s) * p
const v128_t vts = wasm_f32x4_mul(vt, vs);
const v128_t vsmo = wasm_f32x4_sub(vs, vone);
const v128_t vemo = wasm_f32x4_add(wasm_f32x4_mul(vp, vts), vsmo);
// Reconstruct tanh(z) = expm1(2z) / (expm1(2z) + 2)
const v128_t vepo = wasm_f32x4_add(vemo, vtwo);
v128_t vy = wasm_f32x4_div(vemo, vepo);
// Reconstruct tanh(x):
//
// / tanh(z) if x <= 0
// tanh(x) =
// \ -tanh(z) if x >= 0
vy = wasm_v128_xor(vy, vinvsignx);
wasm_v128_store(output, vy);
output += 4;
}
}
| 5,159
| 42.361345
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/operators/batch-matrix-multiply-nc.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/operator-type.h>
#include <xnnpack/params.h>
enum xnn_status create_batch_matrix_multiply_nc(
uint32_t flags,
const void* params,
size_t params_size,
const struct xnn_gemm_config* gemm_config,
const struct gemm_fused_ukernels* gemm_ukernels,
enum xnn_operator_type operator_type,
xnn_operator_t* batch_matrix_multiply_op_out)
{
xnn_operator_t batch_matrix_multiply_op = NULL;
enum xnn_status status = xnn_status_uninitialized;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error(
"failed to create %s operator: XNNPACK is not initialized", xnn_operator_type_to_string(operator_type));
goto error;
}
status = xnn_status_out_of_memory;
batch_matrix_multiply_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
if (batch_matrix_multiply_op == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator descriptor",
sizeof(struct xnn_operator), xnn_operator_type_to_string(operator_type));
goto error;
}
memcpy(&batch_matrix_multiply_op->params, params, params_size);
batch_matrix_multiply_op->type = operator_type;
batch_matrix_multiply_op->flags = flags;
const size_t mr = gemm_config->mr;
batch_matrix_multiply_op->ukernel.type = xnn_microkernel_type_gemm;
batch_matrix_multiply_op->ukernel.gemm = (struct xnn_ukernel_gemm) {
.mr = mr,
.nr = gemm_config->nr,
.kr = UINT32_C(1) << gemm_config->log2_kr,
.sr = UINT32_C(1) << gemm_config->log2_sr,
};
assert(XNN_MAX_MR >= mr);
for (size_t i = 0; i < mr; i++) {
batch_matrix_multiply_op->ukernel.gemm.gemm_cases[i] = gemm_ukernels->gemm[i];
}
batch_matrix_multiply_op->ukernel.gemm.packw_gemm_goi = gemm_config->pack_gemm_goi;
batch_matrix_multiply_op->state = xnn_run_state_invalid;
*batch_matrix_multiply_op_out = batch_matrix_multiply_op;
return xnn_status_success;
error:
xnn_delete_operator(batch_matrix_multiply_op);
return status;
}
enum xnn_status xnn_create_batch_matrix_multiply_nc_f32(
uint32_t flags,
xnn_operator_t* batch_matrix_multiply_op_out)
{
const struct xnn_gemm_config* gemm_config = xnn_init_f32_gemm_config();
if (gemm_config == NULL) {
xnn_log_error("failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_batch_matrix_multiply_nc_f32));
return xnn_status_unsupported_hardware;
}
const struct gemm_fused_ukernels* gemm_ukernels = &gemm_config->minmax;
if (gemm_config->linear.gemm[gemm_config->mr-1].function[XNN_UARCH_DEFAULT] != NULL) {
gemm_ukernels = &gemm_config->linear;
}
union xnn_f32_minmax_params params;
if XNN_LIKELY(gemm_config->init.f32 != NULL) {
gemm_config->init.f32(¶ms, -INFINITY, INFINITY);
}
return create_batch_matrix_multiply_nc(
flags,
¶ms, sizeof(params),
gemm_config, gemm_ukernels,
xnn_operator_type_batch_matrix_multiply_nc_f32,
batch_matrix_multiply_op_out);
}
static enum xnn_status reshape_batch_matrix_multiply_nc(
xnn_operator_t batch_matrix_multiply_op,
enum xnn_operator_type expected_operator_type,
size_t batch_size,
size_t m,
size_t k,
size_t n,
size_t* workspace_size,
size_t* workspace_alignment,
uint32_t log2_input1_element_size,
uint32_t log2_input2_element_size,
uint32_t bias_element_size,
uint32_t log2_output_element_size,
const void* params,
size_t params_size,
size_t num_threads)
{
if (batch_matrix_multiply_op->type != expected_operator_type) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(batch_matrix_multiply_op->type));
return xnn_status_invalid_parameter;
}
batch_matrix_multiply_op->state = xnn_run_state_invalid;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(batch_matrix_multiply_op->type));
return xnn_status_uninitialized;
}
if (m == 0) {
xnn_log_error(
"failed to create %s operator with %zu rows: number of rows must be non-zero",
xnn_operator_type_to_string(batch_matrix_multiply_op->type), m);
return xnn_status_invalid_parameter;
}
if (k == 0) {
xnn_log_error(
"failed to create %s operator with %zu columns: number of columns must be non-zero",
xnn_operator_type_to_string(batch_matrix_multiply_op->type), k);
return xnn_status_invalid_parameter;
}
if (n == 0) {
xnn_log_error(
"failed to create %s operator with %zu columns: number of columns must be non-zero",
xnn_operator_type_to_string(batch_matrix_multiply_op->type), n);
return xnn_status_invalid_parameter;
}
if (batch_size == 0) {
batch_matrix_multiply_op->state = xnn_run_state_skip;
return xnn_status_success;
}
const uint32_t nr = batch_matrix_multiply_op->ukernel.gemm.nr;
const uint32_t kr = batch_matrix_multiply_op->ukernel.gemm.kr;
const uint32_t sr = batch_matrix_multiply_op->ukernel.gemm.sr;
const size_t n_stride = round_up(n, nr);
const size_t k_stride = round_up_po2(k, kr * sr);
*workspace_size = batch_size * (n_stride * bias_element_size + ((n_stride * k_stride) << log2_input2_element_size));
*workspace_alignment = XNN_ALLOCATION_ALIGNMENT;
uint32_t mr = batch_matrix_multiply_op->ukernel.gemm.mr;
struct xnn_hmp_gemm_ukernel *gemm_cases = batch_matrix_multiply_op->ukernel.gemm.gemm_cases;
if (m == 1 && batch_matrix_multiply_op->ukernel.gemm.gemm_cases[0].function[XNN_UARCH_DEFAULT] != NULL) {
mr = 1;
}
assert(mr != 0 && mr <= XNN_MAX_MR);
struct xnn_hmp_gemm_ukernel gemm_ukernel = gemm_cases[mr-1];
assert(batch_matrix_multiply_op->ukernel.gemm.packw_gemm_goi != NULL);
batch_matrix_multiply_op->context.packw_gemm_goi = (struct packw_gemm_goi_context) {
.g = batch_size,
.kc = k,
.nr = nr,
.kr = kr,
.sr = sr,
.k_stride = k << log2_input1_element_size,
.bias = NULL,
.b_stride = bias_element_size,
.w_stride = bias_element_size + (k_stride << log2_input1_element_size),
.packw_gemm_goi = batch_matrix_multiply_op->ukernel.gemm.packw_gemm_goi,
};
size_t w_stride = bias_element_size + (round_up_po2(k, kr * sr) << log2_input1_element_size);
batch_matrix_multiply_op->context.gemm = (struct gemm_context) {
.k_scaled = k << log2_input1_element_size,
.a_stride = k << log2_input1_element_size,
.ga_stride = m * (k << log2_input1_element_size),
.w_stride = w_stride,
.gw_stride = w_stride * round_up(n, nr),
.cm_stride = n << log2_output_element_size,
.cn_stride = nr << log2_output_element_size,
.gc_stride = (m * n) << log2_output_element_size,
.log2_csize = log2_output_element_size,
.ukernel = gemm_ukernel,
};
memcpy(&batch_matrix_multiply_op->context.gemm.params, params, params_size);
batch_matrix_multiply_op->context.gemm.fused_params = &batch_matrix_multiply_op->context.gemm.params;
batch_matrix_multiply_op->compute[0].type = xnn_parallelization_type_1d_tile_1d;
batch_matrix_multiply_op->compute[0].task_1d_tile_1d = (pthreadpool_task_1d_tile_1d_t) xnn_compute_packw_gemm_goi;
batch_matrix_multiply_op->compute[0].context_offset =
offsetof(struct xnn_operator, context.packw_gemm_goi) - offsetof(struct xnn_operator, context);
batch_matrix_multiply_op->compute[0].range[0] = n;
// TODO(zhin): figure out if we can parallelize this packing.
batch_matrix_multiply_op->compute[0].tile[0] = n;
#if XNN_TEST_MODE
const size_t nc = nr;
#else
size_t nc = n;
if (num_threads > 1) {
const size_t num_other_tiles = divide_round_up(m, mr);
const size_t target_tiles_per_thread = 5;
const size_t max_nc = divide_round_up(n * num_other_tiles, num_threads * target_tiles_per_thread);
if (max_nc < nc) {
nc = min(nc, divide_round_up(nc, max_nc * nr) * nr);
}
}
#endif
#if XNN_MAX_UARCH_TYPES > 1
if (xnn_is_hmp_gemm_ukernel(gemm_ukernel)) {
batch_matrix_multiply_op->compute[1].type = xnn_parallelization_type_3d_tile_2d_with_uarch;
batch_matrix_multiply_op->compute[1].task_3d_tile_2d_with_id =
(pthreadpool_task_3d_tile_2d_with_id_t) xnn_compute_hmp_grouped_gemm;
} else {
batch_matrix_multiply_op->compute[1].type = xnn_parallelization_type_3d_tile_2d;
batch_matrix_multiply_op->compute[1].task_3d_tile_2d = (pthreadpool_task_3d_tile_2d_t) xnn_compute_grouped_gemm;
}
#else
batch_matrix_multiply_op->compute[1].type = xnn_parallelization_type_3d_tile_2d;
batch_matrix_multiply_op->compute[1].task_3d_tile_2d = (pthreadpool_task_3d_tile_2d_t) xnn_compute_grouped_gemm;
#endif
batch_matrix_multiply_op->compute[1].range[0] = batch_size;
batch_matrix_multiply_op->compute[1].range[1] = m;
batch_matrix_multiply_op->compute[1].range[2] = n;
batch_matrix_multiply_op->compute[1].tile[0] = mr;
batch_matrix_multiply_op->compute[1].tile[1] = nc;
batch_matrix_multiply_op->state = xnn_run_state_needs_setup;
return xnn_status_success;
}
enum xnn_status xnn_reshape_batch_matrix_multiply_nc_f32(
xnn_operator_t batch_matrix_multiply_op,
size_t batch_size,
size_t m,
size_t k,
size_t n,
size_t* workspace_size,
size_t* workspace_alignment,
pthreadpool_t threadpool)
{
return reshape_batch_matrix_multiply_nc(
batch_matrix_multiply_op, xnn_operator_type_batch_matrix_multiply_nc_f32,
batch_size, m, k, n,
workspace_size, workspace_alignment,
/*log2_input1_element_size=*/XNN_LOG2_SIZEOF_FLOAT,
/*log2_input2_element_size=*/XNN_LOG2_SIZEOF_FLOAT,
/*bias_element_size=*/sizeof(float),
/*log2_output_element_size=*/XNN_LOG2_SIZEOF_FLOAT,
&batch_matrix_multiply_op->params.f32_minmax,
sizeof(batch_matrix_multiply_op->params.f32_minmax),
pthreadpool_get_threads_count(threadpool));
}
static enum xnn_status setup_batch_matrix_multiply_nc(
xnn_operator_t batch_matrix_multiply_op,
enum xnn_operator_type expected_operator_type,
void* workspace,
const void* input1,
const void* input2,
void* output)
{
if (batch_matrix_multiply_op->type != expected_operator_type) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(batch_matrix_multiply_op->type));
return xnn_status_invalid_parameter;
}
switch (batch_matrix_multiply_op->state) {
case xnn_run_state_skip:
return xnn_status_success;
case xnn_run_state_invalid:
xnn_log_error(
"failed to setup %s operator: operator has not been reshaped yet",
xnn_operator_type_to_string(batch_matrix_multiply_op->type));
return xnn_status_invalid_state;
case xnn_run_state_needs_setup:
// Operator has been reshaped, but not setup, continue with setup.
case xnn_run_state_ready:
// Operator has been reshaped, and we are setting up with different pointers.
break;
}
batch_matrix_multiply_op->context.packw_gemm_goi.kernel = input2;
batch_matrix_multiply_op->context.packw_gemm_goi.bias = NULL;
batch_matrix_multiply_op->context.packw_gemm_goi.packed_weights = workspace;
batch_matrix_multiply_op->context.gemm.a = input1;
batch_matrix_multiply_op->context.gemm.packed_w = workspace;
batch_matrix_multiply_op->context.gemm.c = output;
batch_matrix_multiply_op->state = xnn_run_state_ready;
return xnn_status_success;
}
enum xnn_status xnn_setup_batch_matrix_multiply_nc_f32(
xnn_operator_t batch_matrix_multiply_op,
void* workspace,
const float* input1,
const float* input2,
float* output)
{
return setup_batch_matrix_multiply_nc(
batch_matrix_multiply_op, xnn_operator_type_batch_matrix_multiply_nc_f32,
workspace, input1, input2, output);
}
| 12,231
| 36.179331
| 118
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.