repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__wasmsimd_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vy = wasm_f32x4_mul(vx, vx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_f32x4_mul(vx, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,312
| 24.25
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/f32-vunary/gen/f32-vsqr-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vunary/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsqr_ukernel__wasmsimd_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vy0123 = wasm_f32x4_mul(vx0123, vx0123);
const v128_t vy4567 = wasm_f32x4_mul(vx4567, vx4567);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vy = wasm_f32x4_mul(vx, vx);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_f32x4_mul(vx, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,714
| 25.796875
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/i16-vlshift/gen/i16-vlshift-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/i16-vlshift/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/vlshift.h>
void xnn_i16_vlshift_ukernel__neon_x16(
size_t batch,
const uint16_t* input,
uint16_t* output,
uint32_t shift)
{
assert(batch != 0);
assert(input != NULL);
assert(output != NULL);
assert(shift < 16);
const int16x8_t vshift = vdupq_n_s16((int16_t) shift);
for (; batch >= 16; batch -= 16) {
const uint16x8_t vi0 = vld1q_u16(input); input += 8;
const uint16x8_t vi1 = vld1q_u16(input); input += 8;
const uint16x8_t vout0 = vshlq_u16(vi0, vshift);
const uint16x8_t vout1 = vshlq_u16(vi1, vshift);
vst1q_u16(output, vout0); output += 8;
vst1q_u16(output, vout1); output += 8;
}
// Remainder of full vectors
for (; batch >= 8; batch -= 8) {
const uint16x8_t vi = vld1q_u16(input); input += 8;
const uint16x8_t vout = vshlq_u16(vi, vshift);
vst1q_u16(output, vout); output += 8;
}
// Remainder of 1 to 7 batch
if XNN_UNLIKELY(batch != 0) {
const uint16x8_t vi = vld1q_u16(input);
const uint16x8_t vout = vshlq_u16(vi, vshift);
uint16x4_t vout_lo = vget_low_u16(vout);
if (batch & 4) {
vst1_u16(output, vout_lo); output += 4;
vout_lo = vget_high_u16(vout);
}
if (batch & 2) {
vst1_lane_u32((void*) output, vreinterpret_u32_u16(vout_lo), 0); output += 2;
vout_lo = vext_u16(vout_lo, vout_lo, 2);
}
if (batch & 1){
vst1_lane_u16(output, vout_lo, 0);
}
}
}
| 1,813
| 24.914286
| 83
|
c
|
XNNPACK
|
XNNPACK-master/src/i16-vlshift/gen/i16-vlshift-neon-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/i16-vlshift/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/vlshift.h>
void xnn_i16_vlshift_ukernel__neon_x24(
size_t batch,
const uint16_t* input,
uint16_t* output,
uint32_t shift)
{
assert(batch != 0);
assert(input != NULL);
assert(output != NULL);
assert(shift < 16);
const int16x8_t vshift = vdupq_n_s16((int16_t) shift);
for (; batch >= 24; batch -= 24) {
const uint16x8_t vi0 = vld1q_u16(input); input += 8;
const uint16x8_t vi1 = vld1q_u16(input); input += 8;
const uint16x8_t vi2 = vld1q_u16(input); input += 8;
const uint16x8_t vout0 = vshlq_u16(vi0, vshift);
const uint16x8_t vout1 = vshlq_u16(vi1, vshift);
const uint16x8_t vout2 = vshlq_u16(vi2, vshift);
vst1q_u16(output, vout0); output += 8;
vst1q_u16(output, vout1); output += 8;
vst1q_u16(output, vout2); output += 8;
}
// Remainder of full vectors
for (; batch >= 8; batch -= 8) {
const uint16x8_t vi = vld1q_u16(input); input += 8;
const uint16x8_t vout = vshlq_u16(vi, vshift);
vst1q_u16(output, vout); output += 8;
}
// Remainder of 1 to 7 batch
if XNN_UNLIKELY(batch != 0) {
const uint16x8_t vi = vld1q_u16(input);
const uint16x8_t vout = vshlq_u16(vi, vshift);
uint16x4_t vout_lo = vget_low_u16(vout);
if (batch & 4) {
vst1_u16(output, vout_lo); output += 4;
vout_lo = vget_high_u16(vout);
}
if (batch & 2) {
vst1_lane_u32((void*) output, vreinterpret_u32_u16(vout_lo), 0); output += 2;
vout_lo = vext_u16(vout_lo, vout_lo, 2);
}
if (batch & 1){
vst1_lane_u16(output, vout_lo, 0);
}
}
}
| 1,966
| 25.945205
| 83
|
c
|
XNNPACK
|
XNNPACK-master/src/i16-vlshift/gen/i16-vlshift-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/i16-vlshift/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/vlshift.h>
void xnn_i16_vlshift_ukernel__neon_x32(
size_t batch,
const uint16_t* input,
uint16_t* output,
uint32_t shift)
{
assert(batch != 0);
assert(input != NULL);
assert(output != NULL);
assert(shift < 16);
const int16x8_t vshift = vdupq_n_s16((int16_t) shift);
for (; batch >= 32; batch -= 32) {
const uint16x8_t vi0 = vld1q_u16(input); input += 8;
const uint16x8_t vi1 = vld1q_u16(input); input += 8;
const uint16x8_t vi2 = vld1q_u16(input); input += 8;
const uint16x8_t vi3 = vld1q_u16(input); input += 8;
const uint16x8_t vout0 = vshlq_u16(vi0, vshift);
const uint16x8_t vout1 = vshlq_u16(vi1, vshift);
const uint16x8_t vout2 = vshlq_u16(vi2, vshift);
const uint16x8_t vout3 = vshlq_u16(vi3, vshift);
vst1q_u16(output, vout0); output += 8;
vst1q_u16(output, vout1); output += 8;
vst1q_u16(output, vout2); output += 8;
vst1q_u16(output, vout3); output += 8;
}
// Remainder of full vectors
for (; batch >= 8; batch -= 8) {
const uint16x8_t vi = vld1q_u16(input); input += 8;
const uint16x8_t vout = vshlq_u16(vi, vshift);
vst1q_u16(output, vout); output += 8;
}
// Remainder of 1 to 7 batch
if XNN_UNLIKELY(batch != 0) {
const uint16x8_t vi = vld1q_u16(input);
const uint16x8_t vout = vshlq_u16(vi, vshift);
uint16x4_t vout_lo = vget_low_u16(vout);
if (batch & 4) {
vst1_u16(output, vout_lo); output += 4;
vout_lo = vget_high_u16(vout);
}
if (batch & 2) {
vst1_lane_u32((void*) output, vreinterpret_u32_u16(vout_lo), 0); output += 2;
vout_lo = vext_u16(vout_lo, vout_lo, 2);
}
if (batch & 1){
vst1_lane_u16(output, vout_lo, 0);
}
}
}
| 2,119
| 26.894737
| 83
|
c
|
XNNPACK
|
XNNPACK-master/src/i16-vlshift/gen/i16-vlshift-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/i16-vlshift/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/vlshift.h>
void xnn_i16_vlshift_ukernel__neon_x8(
size_t batch,
const uint16_t* input,
uint16_t* output,
uint32_t shift)
{
assert(batch != 0);
assert(input != NULL);
assert(output != NULL);
assert(shift < 16);
const int16x8_t vshift = vdupq_n_s16((int16_t) shift);
// Remainder of full vectors
for (; batch >= 8; batch -= 8) {
const uint16x8_t vi = vld1q_u16(input); input += 8;
const uint16x8_t vout = vshlq_u16(vi, vshift);
vst1q_u16(output, vout); output += 8;
}
// Remainder of 1 to 7 batch
if XNN_UNLIKELY(batch != 0) {
const uint16x8_t vi = vld1q_u16(input);
const uint16x8_t vout = vshlq_u16(vi, vshift);
uint16x4_t vout_lo = vget_low_u16(vout);
if (batch & 4) {
vst1_u16(output, vout_lo); output += 4;
vout_lo = vget_high_u16(vout);
}
if (batch & 2) {
vst1_lane_u32((void*) output, vreinterpret_u32_u16(vout_lo), 0); output += 2;
vout_lo = vext_u16(vout_lo, vout_lo, 2);
}
if (batch & 1){
vst1_lane_u16(output, vout_lo, 0);
}
}
}
| 1,463
| 23.4
| 83
|
c
|
XNNPACK
|
XNNPACK-master/src/i16-vlshift/gen/i16-vlshift-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/i16-vlshift/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/vlshift.h>
void xnn_i16_vlshift_ukernel__scalar_x1(
size_t batch,
const uint16_t* input,
uint16_t* output,
uint32_t shift)
{
assert(batch != 0);
assert(input != NULL);
assert(output != NULL);
assert(shift < 16);
if XNN_UNLIKELY(batch != 0) {
do {
const uint16_t vi = *input++;
const uint16_t vout = vi << shift;
*output++ = vout;
} while (--batch != 0);
}
}
| 789
| 20.351351
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/i16-vlshift/gen/i16-vlshift-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/i16-vlshift/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/vlshift.h>
void xnn_i16_vlshift_ukernel__scalar_x2(
size_t batch,
const uint16_t* input,
uint16_t* output,
uint32_t shift)
{
assert(batch != 0);
assert(input != NULL);
assert(output != NULL);
assert(shift < 16);
for (; batch >= 2; batch -= 2) {
const uint16_t vi0 = input[0];
const uint16_t vi1 = input[1];
input += 2;
const uint16_t vout0 = vi0 << shift;
const uint16_t vout1 = vi1 << shift;
output[0] = vout0;
output[1] = vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint16_t vi = *input++;
const uint16_t vout = vi << shift;
*output++ = vout;
} while (--batch != 0);
}
}
| 1,061
| 20.673469
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/i16-vlshift/gen/i16-vlshift-scalar-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/i16-vlshift/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/vlshift.h>
void xnn_i16_vlshift_ukernel__scalar_x3(
size_t batch,
const uint16_t* input,
uint16_t* output,
uint32_t shift)
{
assert(batch != 0);
assert(input != NULL);
assert(output != NULL);
assert(shift < 16);
for (; batch >= 3; batch -= 3) {
const uint16_t vi0 = input[0];
const uint16_t vi1 = input[1];
const uint16_t vi2 = input[2];
input += 3;
const uint16_t vout0 = vi0 << shift;
const uint16_t vout1 = vi1 << shift;
const uint16_t vout2 = vi2 << shift;
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint16_t vi = *input++;
const uint16_t vout = vi << shift;
*output++ = vout;
} while (--batch != 0);
}
}
| 1,160
| 21.326923
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/i16-vlshift/gen/i16-vlshift-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/i16-vlshift/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/vlshift.h>
void xnn_i16_vlshift_ukernel__scalar_x4(
size_t batch,
const uint16_t* input,
uint16_t* output,
uint32_t shift)
{
assert(batch != 0);
assert(input != NULL);
assert(output != NULL);
assert(shift < 16);
for (; batch >= 4; batch -= 4) {
const uint16_t vi0 = input[0];
const uint16_t vi1 = input[1];
const uint16_t vi2 = input[2];
const uint16_t vi3 = input[3];
input += 4;
const uint16_t vout0 = vi0 << shift;
const uint16_t vout1 = vi1 << shift;
const uint16_t vout2 = vi2 << shift;
const uint16_t vout3 = vi3 << shift;
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint16_t vi = *input++;
const uint16_t vout = vi << shift;
*output++ = vout;
} while (--batch != 0);
}
}
| 1,259
| 21.909091
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-exp-neonfp16arith-rr2-p3.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_exp__neonfp16arith_rr2_p3(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x6600))); // 0x1.800p+10h
// The smallest x for which exph(x) is non-zero.
const float16x8_t vzero_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xCC55))); // -0x1.154p+4h
// The largest x for which exph(x) is finite.
const float16x8_t vinf_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x498F))); // 0x1.63Cp+3h
const float16x8_t vlog2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3DC5))); // 0x1.714p+0h
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xB98C))); // -0x1.630p-1h
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x0AF4))); // 0x1.BD0p-13h
const float16x8_t vplus_inf = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x7C00)));
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * c3))
// on [-log(2)/2, log(2)/2]
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x315B))); // 0x1.56Cp-3h
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3808))); // 0x1.020p-1h
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const int16x8_t vmin_exponent = vmovq_n_s16(INT16_C(0xC800));
const int16x8_t vmax_exponent = vreinterpretq_s16_f16(vone);
const int16x8_t vdefault_exponent = vmax_exponent;
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of result to an integer, then subtracing the
// large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x / log(2)| <= 2**9, i.e.
// |x| <= 0x1.630p+8 = 355), but that's ok, because inputs outside of [-17.328125, 11.1171875] underflow or overflow
// exph(x) anyway. We fixup the result for such inputs at the very end of the algorithm.
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
// Create two floating-point numbers, sn (scale, normal) and so (scale, overflow) such that sn * so == 2**n
// for inputs which don't cause overflow, i.e. -17.328125 <= x <= 11.1171875, and -25 <= n <= 16 accordingly.
// We need to use two numbers rather than one because a normalized half-precision exponent must be in [-14, 15]
// range, which is insufficient to cover [-25, 16] range of n.
// - When n is within [-14, 15], sn == 2**n and so == 1.0.
// - When n < -14, sn == 2**(-14) and so == 2**(n + 14).
// - When n > 15, sn == 2**15 and so == 2**(n - 15).
int16x8_t veo = vshlq_n_s16(vreinterpretq_s16_f16(vn), 10);
int16x8_t ven = vmaxq_s16(veo, vmin_exponent);
ven = vminq_s16(ven, vmax_exponent);
veo = vsubq_s16(veo, ven);
const float16x8_t vsn = vreinterpretq_f16_s16(vaddq_s16(ven, vdefault_exponent));
const float16x8_t vso = vreinterpretq_f16_s16(vaddq_s16(veo, vdefault_exponent));
// Subtract the large number back to get final n := round(x / log(2)).
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
// Compute degree-3 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmaq_f16(vone, vp, vt);
// Reconstruct the final f value:
// f = so * sn * (1 + t * (1 + t * (c2 + t * c3)))
// = sn * (so + (t * so) * (1 + t * (c2 + t * c3)))
// = sn * (so + (t * so) * p)
vt = vmulq_f16(vt, vso);
float16x8_t vf = vmulq_f16(vsn, vfmaq_f16(vso, vt, vp));
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcltq_f16(vx, vzero_cutoff)));
// For inputs above inf cutoff, replace output with +inf.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vbslq_f16(vcgtq_f16(vx, vinf_cutoff), vplus_inf, vf);
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 5,008
| 50.112245
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-expm1minus-avx2-rr1-p2.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_expm1minus__avx2_rr1_p2(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.0A4000p+3f);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p0f);
const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * c2)
// on [-log(2)/2, log(2)/2]
const __m256 vc2 = _mm256_set1_ps(0x1.FFFAEEp-2f);
const __m256 vc1 = _mm256_set1_ps(0x1.028C1Cp0f);
const __m256 vone = _mm256_set1_ps(1.0f);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
__m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
// The function saturates at -1 for large negative inputs: expm1h(x) == -1.0h for x <= sat_cutoff ~= -8.3203125.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1m(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vx = _mm256_max_ps(vx, vsat_cutoff);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
// the large number back. The addition is combined with multiplication by log2e into a single FMA instruction. The
// trick with adding large number is valid only within certain bounds (|x / log(2)| <= 2**9, i.e.
// |x| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs x are restricted to [-8.3203125, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for valid inputs, i.e.
// -8.3203125 <= x <= 0.0, and -12 <= n <= 0 accordingly.
// For NaN inputs, s would have zero mantissa and can have arbitrary sign and exponent, depending on the input
// NaN payload. In these cases, n and t are NaNs with the same payload as input while s is non-NaN, and thus
// input payload would be propagated in all computations.
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final n := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
// Compute degree-2 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
// P(t) = t * (c1 + t * (c2 + t * c3))
// = t * p
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * p) - 1
// = (s - 1) + (s * t) * p
// = (t * s) * p + (s - 1)
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
const __m256 vf = _mm256_fmadd_ps(vp, vt, vs);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
}
| 3,635
| 42.807229
| 118
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-expm1minus-avx2-rr1-p3.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_expm1minus__avx2_rr1_p3(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.0A4000p+3f);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p0f);
const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (c1 + t * (c2 + t * c3))
// on [-log(2)/2, log(2)/2]
const __m256 vc3 = _mm256_set1_ps(0x1.5554DCp-3f);
const __m256 vc2 = _mm256_set1_ps(0x1.01EBB2p-1f);
const __m256 vc1 = _mm256_set1_ps(0x1.0002F2p0f);
const __m256 vone = _mm256_set1_ps(1.0f);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
__m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
// The function saturates at -1 for large negative inputs: expm1h(x) == -1.0h for x <= sat_cutoff ~= -8.3203125.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1m(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vx = _mm256_max_ps(vx, vsat_cutoff);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
// the large number back. The addition is combined with multiplication by log2e into a single FMA instruction. The
// trick with adding large number is valid only within certain bounds (|x / log(2)| <= 2**9, i.e.
// |x| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs x are restricted to [-8.3203125, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for valid inputs, i.e.
// -8.3203125 <= x <= 0.0, and -12 <= n <= 0 accordingly.
// For NaN inputs, s would have zero mantissa and can have arbitrary sign and exponent, depending on the input
// NaN payload. In these cases, n and t are NaNs with the same payload as input while s is non-NaN, and thus
// input payload would be propagated in all computations.
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final n := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
// Compute degree-3 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
// P(t) = t * (c1 + t * (c2 + t * c3))
// = t * p
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * p) - 1
// = (s - 1) + (s * t) * p
// = (t * s) * p + (s - 1)
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
const __m256 vf = _mm256_fmadd_ps(vp, vt, vs);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
}
| 3,733
| 42.929412
| 118
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-expm1minus-neonfp16arith-rr1-p3.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_expm1minus__neonfp16arith_rr1_p3(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC829))); // -0x1.0A4p+3h
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vlog2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3DC5))); // 0x1.714p+0h
const float16x8_t vminus_ln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xB98C))); // -0x1.630p-1h
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * c3))
// on [-log(2)/2, log(2)/2]
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x315B))); // 0x1.56Cp-3h
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3808))); // 0x1.020p-1h
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// The function saturates at -1 for large negative inputs: expm1h(x) == -1.0h for x <= sat_cutoff ~= -8.3203125.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1m(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vx = vmaxq_f16(vx, vsat_cutoff);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
// the large number back. The addition is combined with multiplication by log2e into a single FMA instruction. The
// trick with adding large number is valid only within certain bounds (|x / log(2)| <= 2**9, i.e.
// |x| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs x are restricted to [-8.3203125, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
// Create a floating-point number s (scale) such that s == 2**n for valid inputs, i.e.
// -8.3203125 <= x <= 0.0, and -12 <= n <= 0 accordingly.
// For NaN inputs, s would have zero mantissa and can have arbitrary sign and exponent, depending on the input
// NaN payload. In these cases, n and t are NaNs with the same payload as input while s is non-NaN, and thus
// input payload would be propagated in all computations.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get final n := round(x / log(2)).
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2);
// Compute degree-3 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
// P(t) = t * (1 + t * (c2 + t * c3))
// = t + t * (t * (c2 + t * c3)) = t + t * p
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vmulq_f16(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * c3))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = vmulq_f16(vt, vs);
const float16x8_t vsm1 = vsubq_f16(vs, vone);
vp = vfmaq_f16(vt, vp, vt);
const float16x8_t vf = vaddq_f16(vp, vsm1);
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 4,033
| 47.60241
| 118
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-expm1minus-neonfp16arith-rr2-p3.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_expm1minus__neonfp16arith_rr2_p3(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC829))); // -0x1.0A4p+3h
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vlog2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3DC5))); // 0x1.714p+0h
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xB98C))); // -0x1.630p-1h
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x0AF4))); // 0x1.BD0p-13h
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * c3))
// on [-log(2)/2, log(2)/2]
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x315B))); // 0x1.56Cp-3h
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3808))); // 0x1.020p-1h
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// The function saturates at -1 for large negative inputs: expm1h(x) == -1.0h for x <= sat_cutoff ~= -8.3203125.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1m(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vx = vmaxq_f16(vx, vsat_cutoff);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
// the large number back. The addition is combined with multiplication by log2e into a single FMA instruction. The
// trick with adding large number is valid only within certain bounds (|x / log(2)| <= 2**9, i.e.
// |x| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs x are restricted to [-8.3203125, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
// Create a floating-point number s (scale) such that s == 2**n for valid inputs, i.e.
// -8.3203125 <= x <= 0.0, and -12 <= n <= 0 accordingly.
// For NaN inputs, s would have zero mantissa and can have arbitrary sign and exponent, depending on the input
// NaN payload. In these cases, n and t are NaNs with the same payload as input while s is non-NaN, and thus
// input payload would be propagated in all computations.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get final n := round(x / log(2)).
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
// Compute degree-3 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
// P(t) = t * (1 + t * (c2 + t * c3))
// = t + t * (t * (c2 + t * c3)) = t + t * p
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vmulq_f16(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * c3))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = vmulq_f16(vt, vs);
const float16x8_t vsm1 = vsubq_f16(vs, vone);
vp = vfmaq_f16(vt, vp, vt);
const float16x8_t vf = vaddq_f16(vp, vsm1);
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 4,296
| 48.965116
| 118
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-expminus-avx2-rr1-p2.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_expminus__avx2_rr1_p2(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p0f);
const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
// Coefficient of polynomial approximation of
// exp(t) ~ 1 + t * (c1 + t * c2) on [-log(2)/2, log(2)/2]
const __m256 vc2 = _mm256_set1_ps(0x1.FF3A32p-2f);
const __m256 vc1 = _mm256_set1_ps(0x1.039E10p+0f);
// The smallest x for which exph(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.368000p+3f);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(float)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**9, i.e. |x| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs x outside
// of [-9.703125, 0] underflow expf(x). We fixup the result for such inputs at the very end of the algorithm.
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= x <= 0.0, and -14 <= n <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final n := round(x / log(2)) as a floating-point number.
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (c1 + t * c2) = 1 + t * p
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
// Reconstruct the exp(x) value:
// exp(x) = s * (1 + t * (c1 + t * c2))
// = s + (t * s) * (c1 + t * c2)
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
}
| 3,192
| 42.148649
| 118
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-expminus-avx2-rr1-p3.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_expminus__avx2_rr1_p3(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p0f);
const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
// Coefficient of polynomial approximation of
// exp(t) ~ 1 + t * (c1 + t * (c2 + t * c3)) on [-log(2)/2, log(2)/2]
const __m256 vc3 = _mm256_set1_ps(0x1.5249A6p-3f);
const __m256 vc2 = _mm256_set1_ps(0x1.021D60p-1f);
const __m256 vc1 = _mm256_set1_ps(0x1.000CD6p+0f);
// The smallest x for which exph(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.368000p+3f);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(float)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**9, i.e. |x| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs x outside
// of [-9.703125, 0] underflow expf(x). We fixup the result for such inputs at the very end of the algorithm.
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= x <= 0.0, and -14 <= n <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final n := round(x / log(2)) as a floating-point number.
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (c1 + t * (c2 + t * c3)) = 1 + t * p
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the exp(x) value:
// exp(x) = s * (1 + t * (c1 + t * c2))
// = s + (t * s) * (c1 + t * c2)
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
}
| 3,300
| 42.434211
| 118
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-expminus-neonfp16arith-rr1-p2.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_expminus__neonfp16arith_rr1_p2(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vlog2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3DC5))); // 0x1.714p+0h
const float16x8_t vminus_ln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xB98C))); // -0x1.630p-1h
// Coefficient of polynomial approximation
// exp(t) ~ 1 + t * (c1 + t * c2)
// on [-log(2)/2, log(2)/2]
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x37F9))); // 0x1.FE4p-2h
const float16x8_t vc1 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C0E))); // 0x1.038p+0h
// The smallest x for which exph(x) is normalized.
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC8DA))); // -0x1.368p+3h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The first addition is combined with multiplication by
// log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**9, i.e. |x| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs outside
// of [-9.703125, 0.0] underflow exph(x) anyway. We fixup the result for such inputs at the very end of the
// algorithm.
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= x <= 0.0, and -14 <= n <= 0 accordingly.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get final n := round(x / log(2)) as a floating-point number.
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2);
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (c1 + t * c2) = 1 + t * p
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
// Reconstruct the exp(x) value:
// exp(x) = s * (1 + t * (c1 + t * c2))
// = s + (t * s) * (c1 + t * c2)
// = s + (t * s) * p
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcltq_f16(vx, vdenorm_cutoff)));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 3,437
| 45.459459
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-expminus-neonfp16arith-rr1-p3.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_expminus__neonfp16arith_rr1_p3(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vlog2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3DC5))); // 0x1.714p+0h
const float16x8_t vminus_ln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xB98C))); // -0x1.630p-1h
// Coefficient of polynomial approximation
// exp(t) ~ 1 + t * (1 + t * (c2 + t * c3))
// on [-log(2)/2, log(2)/2]
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3156))); // 0x1.558p-3h
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3808))); // 0x1.020p-1h
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
// The smallest x for which exph(x) is normalized.
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC8DA))); // -0x1.368p+3h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The first addition is combined with multiplication by
// log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**9, i.e. |x| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs outside
// of [-9.703125, 0.0] underflow exph(x) anyway. We fixup the result for such inputs at the very end of the
// algorithm.
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= x <= 0.0, and -14 <= n <= 0 accordingly.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get final n := round(x / log(2)) as a floating-point number.
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2);
// Compute degree-3 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (1 + t * (c2 + t * c3)) = 1 + t * p
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmaq_f16(vone, vp, vt);
// Reconstruct the exp(x) value:
// exp(x) = s * (1 + t * (1 + t * (c2 + t * c3)))
// = s + (t * s) * (1 + t * (c2 + t * c3))
// = s + (t * s) * p
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcltq_f16(vx, vdenorm_cutoff)));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 3,595
| 46.315789
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-expminus-neonfp16arith-rr2-p2.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_expminus__neonfp16arith_rr2_p2(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vlog2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3DC5))); // 0x1.714p+0h
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xB98C))); // -0x1.630p-1h
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x0AF4))); // 0x1.BD0p-13h
// Coefficient of polynomial approximation
// exp(t) ~ 1 + t * (c1 + t * c2)
// on [-log(2)/2, log(2)/2]
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x37F9))); // 0x1.FE4p-2h
const float16x8_t vc1 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C0E))); // 0x1.038p+0h
// The smallest x for which exph(x) is normalized.
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC8DA))); // -0x1.368p+3h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The first addition is combined with multiplication by
// log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**9, i.e. |x| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs outside
// of [-9.703125, 0.0] underflow exph(x) anyway. We fixup the result for such inputs at the very end of the
// algorithm.
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= x <= 0.0, and -14 <= n <= 0 accordingly.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get final n := round(x / log(2)) as a floating-point number.
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (c1 + t * c2) = 1 + t * p
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
// Reconstruct the exp(x) value:
// exp(x) = s * (1 + t * (c1 + t * c2))
// = s + (t * s) * (c1 + t * c2)
// = s + (t * s) * p
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcltq_f16(vx, vdenorm_cutoff)));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 3,700
| 47.064935
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-expminus-neonfp16arith-rr2-p3.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_expminus__neonfp16arith_rr2_p3(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vlog2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3DC5))); // 0x1.714p+0h
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xB98C))); // -0x1.630p-1h
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x0AF4))); // 0x1.BD0p-13h
// Coefficient of polynomial approximation
// exp(t) ~ 1 + t * (1 + t * (c2 + t * c3))
// on [-log(2)/2, log(2)/2]
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3156))); // 0x1.558p-3h
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3808))); // 0x1.020p-1h
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
// The smallest x for which exph(x) is normalized.
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC8DA))); // -0x1.368p+3h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The first addition is combined with multiplication by
// log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**9, i.e. |x| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs outside
// of [-9.703125, 0.0] underflow exph(x) anyway. We fixup the result for such inputs at the very end of the
// algorithm.
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= x <= 0.0, and -14 <= n <= 0 accordingly.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get final n := round(x / log(2)) as a floating-point number.
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
// Compute degree-3 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (1 + t * (c2 + t * c3)) = 1 + t * p
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmaq_f16(vone, vp, vt);
// Reconstruct the exp(x) value:
// exp(x) = s * (1 + t * (1 + t * (c2 + t * c3)))
// = s + (t * s) * (1 + t * (c2 + t * c3))
// = s + (t * s) * p
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcltq_f16(vx, vdenorm_cutoff)));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 3,858
| 47.848101
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-f32-cvt-neon-int16.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_f32_cvt__neon_int16(
size_t n,
const void* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
const uint16x8_t vsign_mask = vmovq_n_u16(0x8000);
const uint16x8_t vexp_offset = vmovq_n_u16(0x7000);
const float32x4_t vexp_scale = vmovq_n_f32(0x1.0p-112f);
const uint32x4_t vmagic_mask = vmovq_n_u32(0x3F000000);
const float32x4_t vmagic_bias = vmovq_n_f32(0.5f);
const uint16x8_t vdenorm_cutoff = vmovq_n_u16(0x0400);
const uint16_t* i = (const uint16_t*) input;
for (; n != 0; n -= 8 * sizeof(float)) {
const uint16x8_t vh = vld1q_u16(i); i += 8;
const uint16x8_t vsign = vandq_u16(vh, vsign_mask);
const uint16x8_t vnonsign = veorq_u16(vh, vsign);
const uint16x8x2_t vprenorm = vzipq_u16(vshlq_n_u16(vnonsign, 13), vsraq_n_u16(vexp_offset, vnonsign, 3));
const float32x4_t vnorm_lo = vmulq_f32(vreinterpretq_f32_u16(vprenorm.val[0]), vexp_scale);
const float32x4_t vnorm_hi = vmulq_f32(vreinterpretq_f32_u16(vprenorm.val[1]), vexp_scale);
const float32x4_t vdenorm_lo = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_mask, vget_low_u16(vnonsign))), vmagic_bias);
const float32x4_t vdenorm_hi = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_mask, vget_high_u16(vnonsign))), vmagic_bias);
const uint16x8_t vmask = vcgtq_u16(vnonsign, vdenorm_cutoff);
const uint32x4_t vmask_lo = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_low_u16(vmask))));
const uint32x4_t vmask_hi = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_high_u16(vmask))));
const uint32x4_t vf_lo = vorrq_u32(vshll_n_u16(vget_low_u16(vsign), 16),
vreinterpretq_u32_f32(vbslq_f32(vmask_lo, vnorm_lo, vdenorm_lo)));
const uint32x4_t vf_hi = vorrq_u32(vshll_n_u16(vget_high_u16(vsign), 16),
vreinterpretq_u32_f32(vbslq_f32(vmask_hi, vnorm_hi, vdenorm_hi)));
vst1q_f32(output, vreinterpretq_f32_u32(vf_lo)); output += 4;
vst1q_f32(output, vreinterpretq_f32_u32(vf_hi)); output += 4;
}
}
| 2,299
| 39.350877
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-f32-cvt-neon-int32.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_f32_cvt__neon_int32(
size_t n,
const void* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
const uint32x4_t vsign_mask = vmovq_n_u32(0x80000000);
const uint32x4_t vexp_offset = vmovq_n_u32(0x70000000);
const float32x4_t vexp_scale = vmovq_n_f32(0x1.0p-112f);
const uint32x4_t vmagic_mask = vmovq_n_u32(0x3F000000);
const float32x4_t vmagic_bias = vmovq_n_f32(0.5f);
const uint32x4_t vdenorm_cutoff = vmovq_n_u32(0x04000000);
const uint16_t* i = (const uint16_t*) input;
for (; n != 0; n -= 8 * sizeof(float)) {
const uint16x8_t vh = vld1q_u16(i); i += 8;
const uint32x4_t vw_lo = vshll_n_u16(vget_low_u16(vh), 16);
const uint32x4_t vw_hi = vshll_n_u16(vget_high_u16(vh), 16);
const uint32x4_t vsign_lo = vandq_u32(vw_lo, vsign_mask);
const uint32x4_t vsign_hi = vandq_u32(vw_hi, vsign_mask);
const uint32x4_t vnonsign_lo = veorq_u32(vw_lo, vsign_lo);
const uint32x4_t vnonsign_hi = veorq_u32(vw_hi, vsign_hi);
const float32x4_t vnorm_lo = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign_lo, 3)), vexp_scale);
const float32x4_t vnorm_hi = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign_hi, 3)), vexp_scale);
const float32x4_t vdenorm_lo = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_mask, vnonsign_lo, 16)), vmagic_bias);
const float32x4_t vdenorm_hi = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_mask, vnonsign_hi, 16)), vmagic_bias);
const uint32x4_t vmask_lo = vcgtq_u32(vnonsign_lo, vdenorm_cutoff);
const uint32x4_t vmask_hi = vcgtq_u32(vnonsign_hi, vdenorm_cutoff);
const uint32x4_t vf_lo = vorrq_u32(vsign_lo, vreinterpretq_u32_f32(vbslq_f32(vmask_lo, vnorm_lo, vdenorm_lo)));
const uint32x4_t vf_hi = vorrq_u32(vsign_hi, vreinterpretq_u32_f32(vbslq_f32(vmask_hi, vnorm_hi, vdenorm_hi)));
vst1q_f32(output, vreinterpretq_f32_u32(vf_lo)); output += 4;
vst1q_f32(output, vreinterpretq_f32_u32(vf_hi)); output += 4;
}
}
| 2,299
| 38.655172
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-f32-cvt-neonfp16.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_f32_cvt__neonfp16(
size_t n,
const void* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
const uint16_t* i = (const uint16_t*) input;
for (; n != 0; n -= 4 * sizeof(float)) {
const float16x4_t vx = vreinterpret_f16_u16(vld1_u16(i)); i += 4;
const float32x4_t vy = vcvt_f32_f16(vx);
vst1q_f32(output, vy); output += 4;
}
}
| 670
| 22.137931
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-f32-cvt-sse2-int16.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_f32_cvt__sse2_int16(
size_t n,
const void* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
const __m128i vsign_mask = _mm_set1_epi16(0x8000);
const __m128i vexp_offset = _mm_set1_epi16(0x7000);
const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
const __m128i vmagic_mask = _mm_set1_epi16(0x3F00);
const __m128 vmagic_bias = _mm_set1_ps(0.5f);
const __m128i vdenorm_cutoff = _mm_set1_epi16(0x0400);
const uint16_t* i = (const uint16_t*) input;
for (; n != 0; n -= 8 * sizeof(float)) {
const __m128i vh = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vsign = _mm_and_si128(vh, vsign_mask);
const __m128i vnonsign = _mm_xor_si128(vh, vsign);
const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
const __m128i vmask_lo = _mm_unpacklo_epi16(vmask, vmask);
const __m128i vmask_hi = _mm_unpackhi_epi16(vmask, vmask);
const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
_mm_or_si128(_mm_and_si128(vmask_lo, vnorm_lo), _mm_andnot_si128(vmask_lo, vdenorm_lo)));
const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
_mm_or_si128(_mm_and_si128(vmask_hi, vnorm_hi), _mm_andnot_si128(vmask_hi, vdenorm_hi)));
_mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
_mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
output += 8;
}
}
| 2,408
| 38.491803
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-f32-cvt-sse41-int16.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_f32_cvt__sse41_int16(
size_t n,
const void* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
const __m128i vsign_mask = _mm_set1_epi16(0x8000);
const __m128i vexp_offset = _mm_set1_epi16(0x7000);
const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
const __m128i vmagic_mask = _mm_set1_epi16(0x3F00);
const __m128 vmagic_bias = _mm_set1_ps(0.5f);
const __m128i vdenorm_cutoff = _mm_set1_epi16(0x0400);
const uint16_t* i = (const uint16_t*) input;
for (; n != 0; n -= 8 * sizeof(float)) {
const __m128i vh = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vsign = _mm_and_si128(vh, vsign_mask);
const __m128i vnonsign = _mm_xor_si128(vh, vsign);
const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
_mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
_mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
output += 8;
}
}
| 2,243
| 37.689655
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-f32-cvt-wasmsimd-int16.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_f32_cvt__wasmsimd_int16(
size_t n,
const void* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
const v128_t vsign_mask = wasm_i16x8_const_splat(0x8000);
const v128_t vexp_offset = wasm_i16x8_const_splat(0x7000);
const v128_t vexp_scale = wasm_f32x4_const_splat(0x1.0p-112f);
const v128_t vmagic_mask = wasm_i16x8_const_splat(0x3F00);
const v128_t vmagic_bias = wasm_f32x4_const_splat(0.5f);
const v128_t vdenorm_cutoff = wasm_i16x8_const_splat(0x0400);
const uint16_t* i = (const uint16_t*) input;
for (; n != 0; n -= 8 * sizeof(float)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vf_lo = wasm_v128_or(wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11),
wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vmask_lo));
const v128_t vf_hi = wasm_v128_or(wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15),
wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vmask_hi));
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
}
| 2,397
| 37.677419
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-f32-cvt-wasmsimd-int32.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_f32_cvt__wasmsimd_int32(
size_t n,
const void* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
const v128_t vsign_mask = wasm_i32x4_const_splat(0x80000000);
const v128_t vexp_offset = wasm_i32x4_const_splat(0x70000000);
const v128_t vexp_scale = wasm_f32x4_const_splat(0x1.0p-112f);
const v128_t vmagic_mask = wasm_i32x4_const_splat(0x3F000000);
const v128_t vmagic_bias = wasm_f32x4_const_splat(0.5f);
const v128_t vdenorm_cutoff = wasm_i32x4_const_splat(0x04000000);
const uint16_t* i = (const uint16_t*) input;
for (; n != 0; n -= 8 * sizeof(float)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_mask), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_mask), vmagic_bias);
const v128_t vmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
const v128_t vmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
const v128_t vf_lo = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vmask_lo));
const v128_t vf_hi = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vmask_hi));
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
}
| 2,356
| 37.639344
| 118
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sigmoid-aarch64-neonfp16arith-rr1-p2-div.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sigmoid__aarch64_neonfp16arith_rr1_p2_div(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); // -0x1.714p+0h
const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); // 0x1.630p-1h
// Coefficient of polynomial approximation
// exp(-t) ~ 1 + t * (c1 + t * c2)
// on [-log(2)/2, log(2)/2]
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x37F9))); // 0x1.FE4p-2h
const float16x8_t vc1 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC0E))); // -0x1.038p+0h
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
// The largest z for which sigmoidh(-z) is normalized.
// This number is also the largest z for which exph(-z) is normalized.
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC8DA))); // -0x1.368p+3h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// General structure of the algorithm:
//
// / exp(x) / (1 + exp(x)) if x <= 0
// f[x] :=
// \ 1 - f[-x] if x >= 0
//
// First we compute f[-z] := exp(-z) / (1 + exp(-z)) where z = abs(x),
// then replace result with 1 - f[-z] if x >= 0.
const float16x8_t vz = vabsq_f16(vx);
// Compute reduced argument n := round(-z / log(2)).
// We do it by adding a large number (magic bias) to the product z * (-1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by -log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|-x / log(2)| <= 2**9, i.e. |z| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs outside
// of [-9.703125, 8.3125] (i.e. z outside [0, 9.703125]) underflow or saturate sigmoidh(x). We fixup the result for
// such inputs at the very end of the algorithm.
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= -z <= 0.0, and -14 <= n <= 0 accordingly.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get the final n := round(-z / log(2)) as a floating-point number.
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2). Note that -t = -z - n * log(2).
float16x8_t vt = vfmaq_f16(vz, vn, vln2);
// Compute degree-2 polynomial approximation for exp(-t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (c1 + t * c2) = 1 + t * p
float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
// Reconstruct the exp(-z) value:
// e = s * (1 + t * (c1 + t * c2)
// = s * (1 + t * p)
// = s + (t * s) * p
vt = vmulq_f16(vt, vs);
float16x8_t ve = vfmaq_f16(vs, vp, vt);
// Denominator of the sigmoid fraction: 1.0 + exp(-z)
float16x8_t vd = vaddq_f16(ve, vone);
// Reconstruct sigmoid(-z) = exp(-z) / (1.0 + exp(-z))
float16x8_t vf = vdivq_f16(ve, vd);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
// Reconstruct sigmoid(x) = x < 0 ? sigmoid(-z) : 1.0 - sigmoid(-z)
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 4,410
| 44.474227
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sigmoid-aarch64-neonfp16arith-rr1-p3-div.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sigmoid__aarch64_neonfp16arith_rr1_p3_div(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); // -0x1.714p+0h
const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); // 0x1.630p-1h
// Coefficient of polynomial approximation
// exp(-t) ~ 1 + t * (c1 + t * c2)
// on [-log(2)/2, log(2)/2]
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xB156))); // -0x1.558p-3h
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3808))); // 0x1.020p-1h
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
// The largest z for which sigmoidh(-z) is normalized.
// This number is also the largest z for which exph(-z) is normalized.
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC8DA))); // -0x1.368p+3h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// General structure of the algorithm:
//
// / exp(x) / (1 + exp(x)) if x <= 0
// f[x] :=
// \ 1 - f[-x] if x >= 0
//
// First we compute f[-z] := exp(-z) / (1 + exp(-z)) where z = abs(x),
// then replace result with 1 - f[-z] if x >= 0.
const float16x8_t vz = vabsq_f16(vx);
// Compute reduced argument n := round(-z / log(2)).
// We do it by adding a large number (magic bias) to the product z * (-1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by -log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|-x / log(2)| <= 2**9, i.e. |z| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs outside
// of [-9.703125, 8.3125] (i.e. z outside [0, 9.703125]) underflow or saturate sigmoidh(x). We fixup the result for
// such inputs at the very end of the algorithm.
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= -z <= 0.0, and -14 <= n <= 0 accordingly.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get the final n := round(-z / log(2)) as a floating-point number.
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2). Note that -t = -z - n * log(2).
float16x8_t vt = vfmaq_f16(vz, vn, vln2);
// Compute degree-3 polynomial approximation for exp(-t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (-1 + t * (c2 + t * c3)) = -(1 - t * p)
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vone, vp, vt);
// Reconstruct the exp(-z) value:
// e = s * (1 + t * (-1 + t * (c2 + t * c3))
// = s * (1 - t * (-p))
// = s - (t * s) * (-p)
vt = vmulq_f16(vt, vs);
float16x8_t ve = vfmsq_f16(vs, vp, vt);
// Denominator of the sigmoid fraction: 1.0 + exp(-z)
float16x8_t vd = vaddq_f16(ve, vone);
// Reconstruct sigmoid(-z) = exp(-z) / (1.0 + exp(-z))
float16x8_t vf = vdivq_f16(ve, vd);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
// Reconstruct sigmoid(x) = x < 0 ? sigmoid(-z) : 1.0 - sigmoid(-z)
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 4,475
| 44.673469
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sigmoid-aarch64-neonfp16arith-rr2-p2-div.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sigmoid__aarch64_neonfp16arith_rr2_p2_div(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); // -0x1.714p+0h
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); // 0x1.630p-1h
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x8AF4))); // -0x1.BD0p-13h
// Coefficient of polynomial approximation
// exp(-t) ~ 1 + t * (c1 + t * c2)
// on [-log(2)/2, log(2)/2]
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x37F9))); // 0x1.FE4p-2h
const float16x8_t vc1 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC0E))); // -0x1.038p+0h
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
// The largest z for which sigmoidh(-z) is normalized.
// This number is also the largest z for which exph(-z) is normalized.
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC8DA))); // -0x1.368p+3h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// General structure of the algorithm:
//
// / exp(x) / (1 + exp(x)) if x <= 0
// f[x] :=
// \ 1 - f[-x] if x >= 0
//
// First we compute f[-z] := exp(-z) / (1 + exp(-z)) where z = abs(x),
// then replace result with 1 - f[-z] if x >= 0.
const float16x8_t vz = vabsq_f16(vx);
// Compute reduced argument n := round(-z / log(2)).
// We do it by adding a large number (magic bias) to the product z * (-1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by -log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|-x / log(2)| <= 2**9, i.e. |z| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs outside
// of [-9.703125, 8.3125] (i.e. z outside [0, 9.703125]) underflow or saturate sigmoidh(x). We fixup the result for
// such inputs at the very end of the algorithm.
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= -z <= 0.0, and -14 <= n <= 0 accordingly.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get the final n := round(-z / log(2)) as a floating-point number.
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent -log(2)) to improve accuracy.
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
// Compute degree-2 polynomial approximation for exp(-t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (c1 + t * c2) = 1 + t * p
float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
// Reconstruct the exp(-z) value:
// e = s * (1 + t * (c1 + t * c2)
// = s * (1 + t * p)
// = s + (t * s) * p
vt = vmulq_f16(vt, vs);
float16x8_t ve = vfmaq_f16(vs, vp, vt);
// Denominator of the sigmoid fraction: 1.0 + exp(-z)
float16x8_t vd = vaddq_f16(ve, vone);
// Reconstruct sigmoid(-z) = exp(-z) / (1.0 + exp(-z))
float16x8_t vf = vdivq_f16(ve, vd);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
// Reconstruct sigmoid(x) = x < 0 ? sigmoid(-z) : 1.0 - sigmoid(-z)
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 4,663
| 45.64
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sigmoid-aarch64-neonfp16arith-rr2-p3-div.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sigmoid__aarch64_neonfp16arith_rr2_p3_div(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); // -0x1.714p+0h
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); // 0x1.630p-1h
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x8AF4))); // -0x1.BD0p-13h
// Coefficient of polynomial approximation
// exp(-t) ~ 1 + t * (c1 + t * c2)
// on [-log(2)/2, log(2)/2]
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xB156))); // -0x1.558p-3h
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3808))); // 0x1.020p-1h
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
// The largest z for which sigmoidh(-z) is normalized.
// This number is also the largest z for which exph(-z) is normalized.
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC8DA))); // -0x1.368p+3h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// General structure of the algorithm:
//
// / exp(x) / (1 + exp(x)) if x <= 0
// f[x] :=
// \ 1 - f[-x] if x >= 0
//
// First we compute f[-z] := exp(-z) / (1 + exp(-z)) where z = abs(x),
// then replace result with 1 - f[-z] if x >= 0.
const float16x8_t vz = vabsq_f16(vx);
// Compute reduced argument n := round(-z / log(2)).
// We do it by adding a large number (magic bias) to the product z * (-1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by -log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|-x / log(2)| <= 2**9, i.e. |z| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs outside
// of [-9.703125, 8.3125] (i.e. z outside [0, 9.703125]) underflow or saturate sigmoidh(x). We fixup the result for
// such inputs at the very end of the algorithm.
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= -z <= 0.0, and -14 <= n <= 0 accordingly.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get the final n := round(-z / log(2)) as a floating-point number.
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent -log(2)) to improve accuracy.
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
// Compute degree-3 polynomial approximation for exp(-t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (-1 + t * (c2 + t * c3)) = -(1 - t * p)
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vone, vp, vt);
// Reconstruct the exp(-z) value:
// e = s * (1 + t * (-1 + t * (c2 + t * c3))
// = s * (1 - t * (-p))
// = s - (t * s) * (-p)
vt = vmulq_f16(vt, vs);
float16x8_t ve = vfmsq_f16(vs, vp, vt);
// Denominator of the sigmoid fraction: 1.0 + exp(-z)
float16x8_t vd = vaddq_f16(ve, vone);
// Reconstruct sigmoid(-z) = exp(-z) / (1.0 + exp(-z))
float16x8_t vf = vdivq_f16(ve, vd);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
// Reconstruct sigmoid(x) = x < 0 ? sigmoid(-z) : 1.0 - sigmoid(-z)
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 4,728
| 45.821782
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sigmoid-avx2-rr1-p2-div.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sigmoid__avx2_rr1_p2_div(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Floating-point mask with only the sign bit set
const __m256 vsign_mask = _mm256_set1_ps(-0.0f);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p0f);
const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
// Coefficient of polynomial approximation of
// exp(t) ~ 1 + t * (c1 + t * c2) on [-log(2)/2, log(2)/2]
const __m256 vc2 = _mm256_set1_ps(0x1.FF3A32p-2f);
const __m256 vc1 = _mm256_set1_ps(0x1.039E10p+0f);
const __m256 vone = _mm256_set1_ps(1.0f);
// The smallest x for which sigmoidh(x) is normalized.
// This number is also the smallest x for which exph(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.368000p+3f);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
// General structure of the algorithm:
//
// / exp(x) / (1 + exp(x)) if x <= 0
// f[x] :=
// \ 1 - f[-x] if x >= 0
//
// First we compute f[z] := exp(z) / (1 + exp(z)) where z = -abs(x), then replace result with 1 - f[z] if x >= 0.
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
// Compute reduced argument n := round(z / log(2)).
// We do it by adding a large number (magic bias) to the product z * (1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**9, i.e. |z| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs x outside
// of [-9.703125, 8.3125] (i.e. z outside [9.703125, 0]) underflow or saturate sigmoidh(x). We fixup the result for
// such inputs at the very end of the algorithm.
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= z <= 0.0, and -14 <= n <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get the final n := round(z / log(2)) as a floating-point number.
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
// P(t) = 1 + t * (c1 + t * c2) = 1 + t * p
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
// Reconstruct the exp(z) value:
// e = s * (1 + t * (c1 + t * c2))
// = s + (t * s) * (c1 + t * c2)
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
// Denominator of the sigmoid fraction: 1.0 + exp(z)
const __m256 vd = _mm256_add_ps(ve, vone);
// Reconstruct sigmoid(z) = exp(z) / (1.0 + exp(z))
__m256 vf = _mm256_div_ps(ve, vd);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
// Reconstruct sigmoid(x) = x < 0 ? sigmoid(z) : 1.0 - sigmoid(z)
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
}
| 4,253
| 41.969697
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sigmoid-avx2-rr1-p2-rcp.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sigmoid__avx2_rr1_p2_rcp(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Floating-point mask with only the sign bit set
const __m256 vsign_mask = _mm256_set1_ps(-0.0f);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p0f);
const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
// Coefficient of polynomial approximation of
// exp(t) ~ 1 + t * (c1 + t * c2) on [-log(2)/2, log(2)/2]
const __m256 vc2 = _mm256_set1_ps(0x1.FF3A32p-2f);
const __m256 vc1 = _mm256_set1_ps(0x1.039E10p+0f);
const __m256 vone = _mm256_set1_ps(1.0f);
// The smallest x for which sigmoidh(x) is normalized.
// This number is also the smallest x for which exph(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.368000p+3f);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
// General structure of the algorithm:
//
// / exp(x) / (1 + exp(x)) if x <= 0
// f[x] :=
// \ 1 - f[-x] if x >= 0
//
// First we compute f[z] := exp(z) / (1 + exp(z)) where z = -abs(x), then replace result with 1 - f[z] if x >= 0.
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
// Compute reduced argument n := round(z / log(2)).
// We do it by adding a large number (magic bias) to the product z * (1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**9, i.e. |z| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs x outside
// of [-9.703125, 8.3125] (i.e. z outside [9.703125, 0]) underflow or saturate sigmoidh(x). We fixup the result for
// such inputs at the very end of the algorithm.
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= z <= 0.0, and -14 <= n <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get the final n := round(z / log(2)) as a floating-point number.
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
// P(t) = 1 + t * (c1 + t * c2) = 1 + t * p
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
// Reconstruct the exp(z) value:
// e = s * (1 + t * (c1 + t * c2))
// = s + (t * s) * (c1 + t * c2)
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
// Denominator of the sigmoid fraction: 1.0 + exp(z)
const __m256 vd = _mm256_add_ps(ve, vone);
// Compute approximate reciprocal of denominator.
// Note: 1 < d <= 2, because z >= 0.0 and 0 < exp(-z) <= 1.0.
// Thus the reciprocal of the denominator never overflows.
const __m256 vr = _mm256_rcp_ps(vd);
// Reconstruct sigmoid(z) = exp(z) / (1.0 + exp(z))
__m256 vf = _mm256_mul_ps(ve, vr);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
// Reconstruct sigmoid(x) = x < 0 ? sigmoid(z) : 1.0 - sigmoid(z)
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
}
| 4,478
| 42.067308
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sigmoid-avx2-rr1-p3-div.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sigmoid__avx2_rr1_p3_div(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Floating-point mask with only the sign bit set
const __m256 vsign_mask = _mm256_set1_ps(-0.0f);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p0f);
const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
// Coefficient of polynomial approximation of
// exp(t) ~ 1 + t * (c1 + t * (c2 + t * c3)) on [-log(2)/2, log(2)/2]
const __m256 vc3 = _mm256_set1_ps(0x1.5249A6p-3f);
const __m256 vc2 = _mm256_set1_ps(0x1.021D60p-1f);
const __m256 vc1 = _mm256_set1_ps(0x1.000CD6p+0f);
const __m256 vone = _mm256_set1_ps(1.0f);
// The smallest x for which sigmoidh(x) is normalized.
// This number is also the smallest x for which exph(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.368000p+3f);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
// General structure of the algorithm:
//
// / exp(x) / (1 + exp(x)) if x <= 0
// f[x] :=
// \ 1 - f[-x] if x >= 0
//
// First we compute f[z] := exp(z) / (1 + exp(z)) where z = -abs(x), then replace result with 1 - f[z] if x >= 0.
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
// Compute reduced argument n := round(z / log(2)).
// We do it by adding a large number (magic bias) to the product z * (1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**9, i.e. |z| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs x outside
// of [-9.703125, 8.3125] (i.e. z outside [9.703125, 0]) underflow or saturate sigmoidh(x). We fixup the result for
// such inputs at the very end of the algorithm.
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= z <= 0.0, and -14 <= n <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get the final n := round(z / log(2)) as a floating-point number.
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
// Compute degree-3 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
// P(t) = 1 + t * (c1 + t * (c2 + t * c3)) = 1 + t * p
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the exp(z) value:
// e = s * (1 + t * (c1 + t * (c2 + t * c3)))
// = s + (t * s) * (c1 + t * (c2 + t * c3))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
// Denominator of the sigmoid fraction: 1.0 + exp(z)
const __m256 vd = _mm256_add_ps(ve, vone);
// Reconstruct sigmoid(z) = exp(z) / (1.0 + exp(z))
__m256 vf = _mm256_div_ps(ve, vd);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
// Reconstruct sigmoid(x) = x < 0 ? sigmoid(z) : 1.0 - sigmoid(z)
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
}
| 4,383
| 42.405941
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sigmoid-avx2-rr1-p3-rcp.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sigmoid__avx2_rr1_p3_rcp(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Floating-point mask with only the sign bit set
const __m256 vsign_mask = _mm256_set1_ps(-0.0f);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p0f);
const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
// Coefficient of polynomial approximation of
// exp(t) ~ 1 + t * (c1 + t * (c2 + t * c3)) on [-log(2)/2, log(2)/2]
const __m256 vc3 = _mm256_set1_ps(0x1.5249A6p-3f);
const __m256 vc2 = _mm256_set1_ps(0x1.021D60p-1f);
const __m256 vc1 = _mm256_set1_ps(0x1.000CD6p+0f);
const __m256 vone = _mm256_set1_ps(1.0f);
// The smallest x for which sigmoidh(x) is normalized.
// This number is also the smallest x for which exph(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.368000p+3f);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
// General structure of the algorithm:
//
// / exp(x) / (1 + exp(x)) if x <= 0
// f[x] :=
// \ 1 - f[-x] if x >= 0
//
// First we compute f[z] := exp(z) / (1 + exp(z)) where z = -abs(x), then replace result with 1 - f[z] if x >= 0.
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
// Compute reduced argument n := round(z / log(2)).
// We do it by adding a large number (magic bias) to the product z * (1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**9, i.e. |z| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs x outside
// of [-9.703125, 8.3125] (i.e. z outside [9.703125, 0]) underflow or saturate sigmoidh(x). We fixup the result for
// such inputs at the very end of the algorithm.
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= z <= 0.0, and -14 <= n <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get the final n := round(z / log(2)) as a floating-point number.
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
// Compute degree-3 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
// P(t) = 1 + t * (c1 + t * (c2 + t * c3)) = 1 + t * p
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the exp(z) value:
// e = s * (1 + t * (c1 + t * (c2 + t * c3)))
// = s + (t * s) * (c1 + t * (c2 + t * c3))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
// Denominator of the sigmoid fraction: 1.0 + exp(z)
const __m256 vd = _mm256_add_ps(ve, vone);
// Compute approximate reciprocal of denominator.
// Note: 1 < d <= 2, because z >= 0.0 and 0 < exp(-z) <= 1.0.
// Thus the reciprocal of the denominator never overflows.
const __m256 vr = _mm256_rcp_ps(vd);
// Reconstruct sigmoid(z) = exp(z) / (1.0 + exp(z))
__m256 vf = _mm256_mul_ps(ve, vr);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
// Reconstruct sigmoid(x) = x < 0 ? sigmoid(z) : 1.0 - sigmoid(z)
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
}
| 4,608
| 42.481132
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sigmoid-neonfp16arith-rr2-p2-nr1fma.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sigmoid__neonfp16arith_rr2_p2_nr1fma(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); // -0x1.714p+0h
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); // 0x1.630p-1h
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x8AF4))); // -0x1.BD0p-13h
// Coefficient of polynomial approximation
// exp(-t) ~ 1 + t * (c1 + t * c2)
// on [-log(2)/2, log(2)/2]
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x37F9))); // 0x1.FE4p-2h
const float16x8_t vc1 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC0E))); // -0x1.038p+0h
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
// The largest z for which sigmoidh(-z) is normalized.
// This number is also the largest z for which exph(-z) is normalized.
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC8DA))); // -0x1.368p+3h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// General structure of the algorithm:
//
// / exp(x) / (1 + exp(x)) if x <= 0
// f[x] :=
// \ 1 - f[-x] if x >= 0
//
// First we compute f[-z] := exp(-z) / (1 + exp(-z)) where z = abs(x),
// then replace result with 1 - f[-z] if x >= 0.
const float16x8_t vz = vabsq_f16(vx);
// Compute reduced argument n := round(-z / log(2)).
// We do it by adding a large number (magic bias) to the product z * (-1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by -log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|-x / log(2)| <= 2**9, i.e. |z| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs outside
// of [-9.703125, 8.3125] (i.e. z outside [0, 9.703125]) underflow or saturate sigmoidh(x). We fixup the result for
// such inputs at the very end of the algorithm.
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= -z <= 0.0, and -14 <= n <= 0 accordingly.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get the final n := round(-z / log(2)) as a floating-point number.
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent -log(2)) to improve accuracy.
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
// Compute degree-2 polynomial approximation for exp(-t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (c1 + t * c2) = 1 + t * p
float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
// Reconstruct the exp(-z) value:
// e = s * (1 + t * (c1 + t * c2)
// = s * (1 + t * p)
// = s + (t * s) * p
vt = vmulq_f16(vt, vs);
float16x8_t ve = vfmaq_f16(vs, vp, vt);
// Denominator of the sigmoid fraction: 1.0 + exp(-z)
float16x8_t vd = vaddq_f16(ve, vone);
// Use Newton-Raphson method (1 iteration) to compute reciprocal of denominator.
// Note: 1 < d <= 2, because z >= 0.0 and 0 < exp(-z) <= 1.0.
// Thus the reciprocal of the denominator never overflows.
float16x8_t vr = vrecpeq_f16(vd);
vr = vfmaq_f16(vr, vr, vfmsq_f16(vone, vr, vd));
// Reconstruct sigmoid(-z) = exp(-z) / (1.0 + exp(-z))
float16x8_t vf = vmulq_f16(ve, vr);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
// Reconstruct sigmoid(x) = x < 0 ? sigmoid(-z) : 1.0 - sigmoid(-z)
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 4,964
| 45.839623
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sigmoid-neonfp16arith-rr2-p2-nr1recps.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sigmoid__neonfp16arith_rr2_p2_nr1recps(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); // -0x1.714p+0h
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); // 0x1.630p-1h
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x8AF4))); // -0x1.BD0p-13h
// Coefficient of polynomial approximation
// exp(-t) ~ 1 + t * (c1 + t * c2)
// on [-log(2)/2, log(2)/2]
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x37F9))); // 0x1.FE4p-2h
const float16x8_t vc1 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC0E))); // -0x1.038p+0h
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
// The largest z for which sigmoidh(-z) is normalized.
// This number is also the largest z for which exph(-z) is normalized.
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC8DA))); // -0x1.368p+3h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// General structure of the algorithm:
//
// / exp(x) / (1 + exp(x)) if x <= 0
// f[x] :=
// \ 1 - f[-x] if x >= 0
//
// First we compute f[-z] := exp(-z) / (1 + exp(-z)) where z = abs(x),
// then replace result with 1 - f[-z] if x >= 0.
const float16x8_t vz = vabsq_f16(vx);
// Compute reduced argument n := round(-z / log(2)).
// We do it by adding a large number (magic bias) to the product z * (-1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by -log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|-x / log(2)| <= 2**9, i.e. |z| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs outside
// of [-9.703125, 8.3125] (i.e. z outside [0, 9.703125]) underflow or saturate sigmoidh(x). We fixup the result for
// such inputs at the very end of the algorithm.
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= -z <= 0.0, and -14 <= n <= 0 accordingly.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get the final n := round(-z / log(2)) as a floating-point number.
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent -log(2)) to improve accuracy.
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
// Compute degree-2 polynomial approximation for exp(-t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (c1 + t * c2) = 1 + t * p
float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
// Reconstruct the exp(-z) value:
// e = s * (1 + t * (c1 + t * c2)
// = s * (1 + t * p)
// = s + (t * s) * p
vt = vmulq_f16(vt, vs);
float16x8_t ve = vfmaq_f16(vs, vp, vt);
// Denominator of the sigmoid fraction: 1.0 + exp(-z)
float16x8_t vd = vaddq_f16(ve, vone);
// Use Newton-Raphson method (1 iteration) to compute reciprocal of denominator.
// Note: 1 < d <= 2, because z >= 0.0 and 0 < exp(-z) <= 1.0.
// Thus the reciprocal of the denominator never overflows.
float16x8_t vr = vrecpeq_f16(vd);
vr = vmulq_f16(vr, vrecpsq_f16(vr, vd));
// Reconstruct sigmoid(-z) = exp(-z) / (1.0 + exp(-z))
float16x8_t vf = vmulq_f16(ve, vr);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
// Reconstruct sigmoid(x) = x < 0 ? sigmoid(-z) : 1.0 - sigmoid(-z)
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 4,958
| 45.783019
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sigmoid-neonfp16arith-rr2-p2-recpe.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sigmoid__neonfp16arith_rr2_p2_recpe(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); // -0x1.714p+0h
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); // 0x1.630p-1h
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x8AF4))); // -0x1.BD0p-13h
// Coefficient of polynomial approximation
// exp(-t) ~ 1 + t * (c1 + t * c2)
// on [-log(2)/2, log(2)/2]
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x37F9))); // 0x1.FE4p-2h
const float16x8_t vc1 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC0E))); // -0x1.038p+0h
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
// The largest z for which sigmoidh(-z) is normalized.
// This number is also the largest z for which exph(-z) is normalized.
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC8DA))); // -0x1.368p+3h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// General structure of the algorithm:
//
// / exp(x) / (1 + exp(x)) if x <= 0
// f[x] :=
// \ 1 - f[-x] if x >= 0
//
// First we compute f[-z] := exp(-z) / (1 + exp(-z)) where z = abs(x),
// then replace result with 1 - f[-z] if x >= 0.
const float16x8_t vz = vabsq_f16(vx);
// Compute reduced argument n := round(-z / log(2)).
// We do it by adding a large number (magic bias) to the product z * (-1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by -log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|-x / log(2)| <= 2**9, i.e. |z| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs outside
// of [-9.703125, 8.3125] (i.e. z outside [0, 9.703125]) underflow or saturate sigmoidh(x). We fixup the result for
// such inputs at the very end of the algorithm.
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= -z <= 0.0, and -14 <= n <= 0 accordingly.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get the final n := round(-z / log(2)) as a floating-point number.
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent -log(2)) to improve accuracy.
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
// Compute degree-2 polynomial approximation for exp(-t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (c1 + t * c2) = 1 + t * p
float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
// Reconstruct the exp(-z) value:
// e = s * (1 + t * (c1 + t * c2)
// = s * (1 + t * p)
// = s + (t * s) * p
vt = vmulq_f16(vt, vs);
float16x8_t ve = vfmaq_f16(vs, vp, vt);
// Denominator of the sigmoid fraction: 1.0 + exp(-z)
float16x8_t vd = vaddq_f16(ve, vone);
// Compute approximate reciprocal of denominator.
// Note: 1 < d <= 2, because z >= 0.0 and 0 < exp(-z) <= 1.0.
// Thus the reciprocal of the denominator never overflows.
const float16x8_t vr = vrecpeq_f16(vd);
// Reconstruct sigmoid(-z) = exp(-z) / (1.0 + exp(-z))
float16x8_t vf = vmulq_f16(ve, vr);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
// Reconstruct sigmoid(x) = x < 0 ? sigmoid(-z) : 1.0 - sigmoid(-z)
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 4,885
| 45.533333
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sigmoid-neonfp16arith-rr2-p3-nr1fma.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sigmoid__neonfp16arith_rr2_p3_nr1fma(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); // -0x1.714p+0h
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); // 0x1.630p-1h
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x8AF4))); // -0x1.BD0p-13h
// Coefficient of polynomial approximation
// exp(-t) ~ 1 + t * (c1 + t * c2)
// on [-log(2)/2, log(2)/2]
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xB156))); // -0x1.558p-3h
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3808))); // 0x1.020p-1h
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
// The largest z for which sigmoidh(-z) is normalized.
// This number is also the largest z for which exph(-z) is normalized.
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC8DA))); // -0x1.368p+3h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// General structure of the algorithm:
//
// / exp(x) / (1 + exp(x)) if x <= 0
// f[x] :=
// \ 1 - f[-x] if x >= 0
//
// First we compute f[-z] := exp(-z) / (1 + exp(-z)) where z = abs(x),
// then replace result with 1 - f[-z] if x >= 0.
const float16x8_t vz = vabsq_f16(vx);
// Compute reduced argument n := round(-z / log(2)).
// We do it by adding a large number (magic bias) to the product z * (-1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by -log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|-x / log(2)| <= 2**9, i.e. |z| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs outside
// of [-9.703125, 8.3125] (i.e. z outside [0, 9.703125]) underflow or saturate sigmoidh(x). We fixup the result for
// such inputs at the very end of the algorithm.
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= -z <= 0.0, and -14 <= n <= 0 accordingly.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get the final n := round(-z / log(2)) as a floating-point number.
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent -log(2)) to improve accuracy.
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
// Compute degree-3 polynomial approximation for exp(-t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (-1 + t * (c2 + t * c3)) = -(1 - t * p)
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vone, vp, vt);
// Reconstruct the exp(-z) value:
// e = s * (1 + t * (-1 + t * (c2 + t * c3))
// = s * (1 - t * (-p))
// = s - (t * s) * (-p)
vt = vmulq_f16(vt, vs);
float16x8_t ve = vfmsq_f16(vs, vp, vt);
// Denominator of the sigmoid fraction: 1.0 + exp(-z)
float16x8_t vd = vaddq_f16(ve, vone);
// Use Newton-Raphson method (1 iteration) to compute reciprocal of denominator.
// Note: 1 < d <= 2, because z >= 0.0 and 0 < exp(-z) <= 1.0.
// Thus the reciprocal of the denominator never overflows.
float16x8_t vr = vrecpeq_f16(vd);
vr = vfmaq_f16(vr, vr, vfmsq_f16(vone, vr, vd));
// Reconstruct sigmoid(-z) = exp(-z) / (1.0 + exp(-z))
float16x8_t vf = vmulq_f16(ve, vr);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
// Reconstruct sigmoid(x) = x < 0 ? sigmoid(-z) : 1.0 - sigmoid(-z)
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 5,029
| 46.009346
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sigmoid-neonfp16arith-rr2-p3-nr1recps.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sigmoid__neonfp16arith_rr2_p3_nr1recps(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); // -0x1.714p+0h
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); // 0x1.630p-1h
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x8AF4))); // -0x1.BD0p-13h
// Coefficient of polynomial approximation
// exp(-t) ~ 1 + t * (c1 + t * c2)
// on [-log(2)/2, log(2)/2]
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xB156))); // -0x1.558p-3h
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3808))); // 0x1.020p-1h
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
// The largest z for which sigmoidh(-z) is normalized.
// This number is also the largest z for which exph(-z) is normalized.
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC8DA))); // -0x1.368p+3h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// General structure of the algorithm:
//
// / exp(x) / (1 + exp(x)) if x <= 0
// f[x] :=
// \ 1 - f[-x] if x >= 0
//
// First we compute f[-z] := exp(-z) / (1 + exp(-z)) where z = abs(x),
// then replace result with 1 - f[-z] if x >= 0.
const float16x8_t vz = vabsq_f16(vx);
// Compute reduced argument n := round(-z / log(2)).
// We do it by adding a large number (magic bias) to the product z * (-1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by -log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|-x / log(2)| <= 2**9, i.e. |z| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs outside
// of [-9.703125, 8.3125] (i.e. z outside [0, 9.703125]) underflow or saturate sigmoidh(x). We fixup the result for
// such inputs at the very end of the algorithm.
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= -z <= 0.0, and -14 <= n <= 0 accordingly.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get the final n := round(-z / log(2)) as a floating-point number.
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent -log(2)) to improve accuracy.
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
// Compute degree-3 polynomial approximation for exp(-t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (-1 + t * (c2 + t * c3)) = -(1 - t * p)
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vone, vp, vt);
// Reconstruct the exp(-z) value:
// e = s * (1 + t * (-1 + t * (c2 + t * c3))
// = s * (1 - t * (-p))
// = s - (t * s) * (-p)
vt = vmulq_f16(vt, vs);
float16x8_t ve = vfmsq_f16(vs, vp, vt);
// Denominator of the sigmoid fraction: 1.0 + exp(-z)
float16x8_t vd = vaddq_f16(ve, vone);
// Use Newton-Raphson method (1 iteration) to compute reciprocal of denominator.
// Note: 1 < d <= 2, because z >= 0.0 and 0 < exp(-z) <= 1.0.
// Thus the reciprocal of the denominator never overflows.
float16x8_t vr = vrecpeq_f16(vd);
vr = vmulq_f16(vr, vrecpsq_f16(vr, vd));
// Reconstruct sigmoid(-z) = exp(-z) / (1.0 + exp(-z))
float16x8_t vf = vmulq_f16(ve, vr);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
// Reconstruct sigmoid(x) = x < 0 ? sigmoid(-z) : 1.0 - sigmoid(-z)
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 5,023
| 45.953271
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sigmoid-neonfp16arith-rr2-p3-recpe.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sigmoid__neonfp16arith_rr2_p3_recpe(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x660F))); // 0x1.83Cp+10h
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); // -0x1.714p+0h
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); // 0x1.630p-1h
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x8AF4))); // -0x1.BD0p-13h
// Coefficient of polynomial approximation
// exp(-t) ~ 1 + t * (c1 + t * c2)
// on [-log(2)/2, log(2)/2]
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xB156))); // -0x1.558p-3h
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3808))); // 0x1.020p-1h
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
// The largest z for which sigmoidh(-z) is normalized.
// This number is also the largest z for which exph(-z) is normalized.
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xC8DA))); // -0x1.368p+3h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// General structure of the algorithm:
//
// / exp(x) / (1 + exp(x)) if x <= 0
// f[x] :=
// \ 1 - f[-x] if x >= 0
//
// First we compute f[-z] := exp(-z) / (1 + exp(-z)) where z = abs(x),
// then replace result with 1 - f[-z] if x >= 0.
const float16x8_t vz = vabsq_f16(vx);
// Compute reduced argument n := round(-z / log(2)).
// We do it by adding a large number (magic bias) to the product z * (-1/log(2)), which cause rounding of the
// result to an integer, then subtracing the large number back. The first addition is combined with multiplication
// by -log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|-x / log(2)| <= 2**9, i.e. |z| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs outside
// of [-9.703125, 8.3125] (i.e. z outside [0, 9.703125]) underflow or saturate sigmoidh(x). We fixup the result for
// such inputs at the very end of the algorithm.
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -9.703125 <= -z <= 0.0, and -14 <= n <= 0 accordingly.
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
// Subtract the large number back to get the final n := round(-z / log(2)) as a floating-point number.
vn = vsubq_f16(vn, vmagic_bias);
// Compute reduced argument t := z - n * log(2). Note that -t = -z - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent -log(2)) to improve accuracy.
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
// Compute degree-3 polynomial approximation for exp(-t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (-1 + t * (c2 + t * c3)) = -(1 - t * p)
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vone, vp, vt);
// Reconstruct the exp(-z) value:
// e = s * (1 + t * (-1 + t * (c2 + t * c3))
// = s * (1 - t * (-p))
// = s - (t * s) * (-p)
vt = vmulq_f16(vt, vs);
float16x8_t ve = vfmsq_f16(vs, vp, vt);
// Denominator of the sigmoid fraction: 1.0 + exp(-z)
float16x8_t vd = vaddq_f16(ve, vone);
// Compute approximate reciprocal of denominator.
// Note: 1 < d <= 2, because z >= 0.0 and 0 < exp(-z) <= 1.0.
// Thus the reciprocal of the denominator never overflows.
const float16x8_t vr = vrecpeq_f16(vd);
// Reconstruct sigmoid(-z) = exp(-z) / (1.0 + exp(-z))
float16x8_t vf = vmulq_f16(ve, vr);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
// Reconstruct sigmoid(x) = x < 0 ? sigmoid(-z) : 1.0 - sigmoid(-z)
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
}
| 4,950
| 45.707547
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sqrt-aarch64-neonfp16arith-sqrt.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sqrt__aarch64_neonfp16arith_sqrt(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vy = vsqrtq_f16(vx);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
}
| 747
| 22.375
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sqrt-neonfp16arith-nr1fma.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sqrt__neonfp16arith_nr1fma(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
const float16x8_t vhalf = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3800))); // 0.5h
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// Initial approximation
const float16x8_t vrsqrtx = vrsqrteq_f16(vx);
float16x8_t vsqrtx = vmulq_f16(vrsqrtx, vx);
const float16x8_t vhalfrsqrtx = vmulq_f16(vrsqrtx, vhalf);
// Netwon-Raphson iteration:
// residual <- 0.5 - sqrtx * halfrsqrtx
// sqrtx <- sqrtx + sqrtx * residual
const float16x8_t vresidual = vfmsq_f16(vhalf, vsqrtx, vhalfrsqrtx);
vsqrtx = vfmaq_f16(vsqrtx, vresidual, vsqrtx);
const float16x8_t vy = vsqrtx;
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
}
| 1,270
| 27.886364
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sqrt-neonfp16arith-nr1fma1adj.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sqrt__neonfp16arith_nr1fma1adj(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
// Positive infininity in bit representation.
const uint16x8_t vpositive_infinity = vmovq_n_u16(UINT16_C(0x7C00));
// 0.5f constant used in Newton-Raphson iterations.
const float16x8_t vhalf = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3800))); // 0.5h
// Mask for the top 4 exponent bits of a IEEE FP16 number.
const uint16x8_t vexp4_mask = vmovq_n_u16(UINT16_C(0x7800));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// Mask for positive infininty, NaN, and negative inputs.
// Results for such inputs are replaced with the special values, typically with NaN.
uint16x8_t vspecial_mask = vcgeq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
uint16x8_t vspecial_value = vmovq_n_u16(UINT16_C(0x7E00));
// Mask for signed zero inputs, both positive and negative. Results for such inputs must be replaced with the input itself.
const uint16x8_t vzero_mask = vceqq_f16(vi, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vspecial_mask = vorrq_u16(vspecial_mask, vzero_mask);
// Mask for positive infininty inputs. Results for such inputs are replaced with the input itself.
const uint16x8_t vinfinity_mask = vceqq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const uint16x8_t vinput_mask = vorrq_u16(vinfinity_mask, vzero_mask);
vspecial_value = vbslq_u16(vinput_mask, vreinterpretq_u16_f16(vi), vspecial_value);
// Replace the top four bits of exponent with 0b0111 to avoid underflow in computations.
const float16x8_t vx = vbslq_f16(vexp4_mask, vhalf, vi);
// Extract the high 4 bits of inputs's exponent.
const int16x8_t vexp4i = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi), vexp4_mask));
// Create floating-point scale to apply to the final result to restore the correct exponent.
const int16x8_t vpostscale = vhsubq_s16(vexp4i, vreinterpretq_s16_f16(vhalf));
// Initial approximation
const float16x8_t vrsqrtx = vrsqrteq_f16(vx);
float16x8_t vsqrtx = vmulq_f16(vrsqrtx, vx);
const float16x8_t vhalfrsqrtx = vmulq_f16(vrsqrtx, vhalf);
// Netwon-Raphson iteration:
// residual <- 0.5 - sqrtx * halfrsqrtx
// sqrtx <- sqrtx + sqrtx * residual
const float16x8_t vresidual = vfmsq_f16(vhalf, vsqrtx, vhalfrsqrtx);
vsqrtx = vfmaq_f16(vsqrtx, vresidual, vsqrtx);
// Final adjustment:
// adjustment <- x - sqrtx * sqrtx
// sqrtx <- sqrtx + halfrsqrtx * adjustment
const float16x8_t vadjustment = vfmsq_f16(vx, vsqrtx, vsqrtx);
vsqrtx = vfmaq_f16(vsqrtx, vhalfrsqrtx, vadjustment);
// Apply exponent adjustment. Use multiplication to propagate NaNs for negative inputs.
float16x8_t vy = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx), vpostscale));
// Replace results for positive infinity and NaN inputs with the input itself.
vy = vbslq_f16(vspecial_mask, vreinterpretq_f16_u16(vspecial_value), vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
}
| 3,568
| 42.52439
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f16-sqrt-neonfp16arith-nr1rsqrts.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f16_sqrt__neonfp16arith_nr1rsqrts(
size_t n,
const void* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
// Initial approximation
float16x8_t vrsqrtx = vrsqrteq_f16(vx);
// Netwon-Raphson iteration: rsqrt_x <- rsqrt_x * ((3 - x * (rsqrt_x * rsqrt_x)) / 2)
// Note: vrsqrtsq_f16(x, y) := (3 - x * y) / 2
vrsqrtx = vmulq_f16(vrsqrtx, vrsqrtsq_f16(vx, vmulq_f16(vrsqrtx, vrsqrtx)));
// Reconstruct sqrt(x) = rsqrt(x) * x
const float16x8_t vy = vmulq_f16(vrsqrtx, vx);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
}
| 1,091
| 26.3
| 89
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-exp-avx-rr2-p5.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_exp__avx_rr2_p5(
size_t n,
const float* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p+23f);
// The smallest x for which expf(x) is non-zero.
const __m256 vzero_cutoff = _mm256_set1_ps(-0x1.9FE368p+6f);
// The largest x for which expf(x) is finite.
const __m256 vinf_cutoff = _mm256_set1_ps(0x1.62E42Ep+6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
// Last 8 bits are zeroes
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
const __m256 vplus_inf = _mm256_set1_ps(INFINITY);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m128i vmin_exponent = _mm_set1_epi32(0xC1000000);
const __m128i vmax_exponent = _mm_set1_epi32(0x3F800000);
const __m128i vdefault_exponent = vmax_exponent;
for (; n != 0; n -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The trick with adding large number is valid only within
// certain bounds (|x| <= 2**22), but that's ok, because inputs outside of [-103.97207, 88.72283] underflow or
// overflow expf(x) anyway. We fixup the result for such inputs at the very end of the algorithm.
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vx, vlog2e), vmagic_bias);
// Create two floating-point numbers, sn (scale, normal) and so (scale, overflow) such that sn * so == 2**n
// for inputs which don't cause overflow, i.e. -103.97207 <= x <= 88.72283, and -150 <= n <= 128 accordingly.
// We need to use two numbers rather than one because a normalized single-precision exponent must be in [-127, 126]
// range, which is insufficient to cover [-150, 128] range of n.
// - When n is within [-127, 126], sn == 2**n and so == 1.0.
// - When n < -127, sn == 2**(-127) and so == 2**(n + 127).
// - When n > 126, sn == 2**126 and so == 2**(n - 126).
__m128i veo_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23);
__m128i veo_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23);
__m128i ven_lo = _mm_max_epi16(veo_lo, vmin_exponent);
__m128i ven_hi = _mm_max_epi16(veo_hi, vmin_exponent);
ven_lo = _mm_min_epi16(ven_lo, vmax_exponent);
ven_hi = _mm_min_epi16(ven_hi, vmax_exponent);
veo_lo = _mm_sub_epi32(veo_lo, ven_lo);
veo_hi = _mm_sub_epi32(veo_hi, ven_hi);
const __m128 vsn_lo = _mm_castsi128_ps(_mm_add_epi32(ven_lo, vdefault_exponent));
const __m128 vsn_hi = _mm_castsi128_ps(_mm_add_epi32(ven_hi, vdefault_exponent));
const __m128 vso_lo = _mm_castsi128_ps(_mm_add_epi32(veo_lo, vdefault_exponent));
const __m128 vso_hi = _mm_castsi128_ps(_mm_add_epi32(veo_hi, vdefault_exponent));
const __m256 vsn = _mm256_insertf128_ps(_mm256_castps128_ps256(vsn_lo), vsn_hi, 1);
const __m256 vso = _mm256_insertf128_ps(_mm256_castps128_ps256(vso_lo), vso_hi, 1);
// Subtract the large number back to get final n := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = so * sn * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = sn * (so + (t * so) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))))
// = sn * (so + (t * so) * p)
vt = _mm256_mul_ps(vt, vso);
__m256 vf = _mm256_mul_ps(vsn, _mm256_add_ps(_mm256_mul_ps(vt, vp), vso));
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vzero_cutoff, _CMP_LT_OS), vf);
// For inputs above inf cutoff, replace output with +inf.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_blendv_ps(vf, vplus_inf, _mm256_cmp_ps(vx, vinf_cutoff, _CMP_GT_OS));
_mm256_storeu_ps(output, vf);
input += 8;
output += 8;
}
}
| 5,301
| 48.092593
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-exp-avx2-rr2-lut8-p3-perm.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_exp__avx2_rr2_lut8_p3_perm(
size_t n,
const float* input,
float* output)
{
assert(n % (16 * sizeof(float)) == 0);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p23f);
// The smallest x for which expf(x) is non-zero.
const __m256 vzero_cutoff = _mm256_set1_ps(-0x1.9FE368p6f);
// The largest x for which expf(x) is finite.
const __m256 vinf_cutoff = _mm256_set1_ps(0x1.62E42Ep6f);
const __m256 vlog2e_x8 = _mm256_set1_ps(0x1.715476p3f);
const __m256 vminus_ln2_o8_hi = _mm256_set1_ps(-0x1.62E43p-4f);
const __m256 vminus_ln2_o8_lo = _mm256_set1_ps(0x1.05C61p-32f);
const __m256 vplus_inf = _mm256_set1_ps(INFINITY);
const __m256 vc2 = _mm256_set1_ps(0x1.00021Ep-1f);
const __m256 vc3 = _mm256_set1_ps(0x1.55559Ap-3f);
const __m256 vtable = _mm256_set_ps(
0x1.D5818Ep+0f, 0x1.AE89FAp+0f, 0x1.8ACE54p+0f, 0x1.6A09E6p+0f,
0x1.4BFDAEp+0f, 0x1.306FE0p+0f, 0x1.172B84p+0f, 0x1.000000p+0f);
const __m256i vmin_exponent = _mm256_set1_epi32(0xC1000000);
const __m256i vmax_exponent = _mm256_set1_epi32(0x3F800000);
const __m256i vdefault_exponent = vmax_exponent;
const __m256i vmantissa_mask = _mm256_set1_epi32(0x007FFFF8);
for (; n != 0; n -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
// Compute reduced argument n := round(x * 8 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of result to an integer, then subtracing the
// large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x| <= 2**22), but that's ok, because
// inputs outside of [-103.97207, 88.72283] underflow or overflow expf(x) anyway. We fixup the result for such
// inputs at the very end of the algorithm.
__m256 vn = _mm256_fmadd_ps(vx, vlog2e_x8, vmagic_bias);
// Create two floating-point numbers, sn (scale, normal) and so (scale, overflow) such that sn * so == 2**n
// for inputs which don't cause overflow, i.e. -103.97207 <= x <= 88.72283, and -150 <= n <= 128 accordingly.
// We need to use two numbers rather than one because a normalized single-precision exponent must be in [-127, 126]
// range, which is insufficient to cover [-150, 128] range of n.
// - When n is within [-127, 126], sn == 2**n and so == 1.0.
// - When n < -127, sn == 2**(-127) and so == 2**(n + 127).
// - When n > 126, sn == 2**126 and so == 2**(n - 126).
__m256i veo = _mm256_slli_epi32(_mm256_and_si256(_mm256_castps_si256(vn), vmantissa_mask), 20);
__m256i ven = _mm256_max_epi32(veo, vmin_exponent);
ven = _mm256_min_epi32(ven, vmax_exponent);
veo = _mm256_sub_epi32(veo, ven);
const __m256 vsn = _mm256_castsi256_ps(_mm256_add_epi32(ven, vdefault_exponent));
const __m256 vso = _mm256_castsi256_ps(_mm256_add_epi32(veo, vdefault_exponent));
// Use the low 3 bits of n (as integer) for table lookup.
__m256 vl = _mm256_permutevar8x32_ps(vtable, _mm256_castps_si256(vn));
// Subtract the large number back to get final n := round(x * 8 / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2) / 8.
// Use Cody-Waite range reduction method (note two constants to represent log(2) / 8) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_o8_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_o8_lo, vt);
// Compute degree-3 polynomial approximation for exp(t) on [-log(2)/16, log(2)/16].
__m256 vp = _mm256_fmadd_ps(vt, vc3, vc2);
// Reconstruct the final f value:
// f = so * sn * l * (1 + t * (1 + t * (c2 + t * c3)))
// = so * sn * (l + l * (t + t * (t * (c2 + t * c3))))
// = sn * ((l * so) + (l * so) * p)
vl = _mm256_mul_ps(vl, vso);
vp = _mm256_mul_ps(vp, vt);
vp = _mm256_fmadd_ps(vt, vp, vt);
__m256 vf = _mm256_fmadd_ps(vl, vp, vl);
vf = _mm256_mul_ps(vf, vsn);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vzero_cutoff, _CMP_LT_OS), vf);
// For inputs above inf cutoff, replace output with +inf.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_blendv_ps(vf, vplus_inf, _mm256_cmp_ps(vx, vinf_cutoff, _CMP_GT_OS));
_mm256_storeu_ps(output, vf);
input += 8;
output += 8;
}
}
| 4,821
| 45.815534
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-exp-avx2-rr2-lut8-p4-perm.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_exp__avx2_rr2_lut8_p4_perm(
size_t n,
const float* input,
float* output)
{
assert(n % (16 * sizeof(float)) == 0);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p23f);
// The smallest x for which expf(x) is non-zero.
const __m256 vzero_cutoff = _mm256_set1_ps(-0x1.9FE368p6f);
// The largest x for which expf(x) is finite.
const __m256 vinf_cutoff = _mm256_set1_ps(0x1.62E42Ep6f);
const __m256 vlog2e_x8 = _mm256_set1_ps(0x1.715476p3f);
const __m256 vminus_ln2_o8_hi = _mm256_set1_ps(-0x1.62E43p-4f);
const __m256 vminus_ln2_o8_lo = _mm256_set1_ps(0x1.05C61p-32f);
const __m256 vplus_inf = _mm256_set1_ps(INFINITY);
const __m256 vc2 = _mm256_set1_ps(0x1.000000p-1f);
const __m256 vc3 = _mm256_set1_ps(0x1.555C82p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.5558A8p-5f);
const __m256 vtable = _mm256_set_ps(
0x1.D5818Ep+0f, 0x1.AE89FAp+0f, 0x1.8ACE54p+0f, 0x1.6A09E6p+0f,
0x1.4BFDAEp+0f, 0x1.306FE0p+0f, 0x1.172B84p+0f, 0x1.000000p+0f);
const __m256i vmin_exponent = _mm256_set1_epi32(0xC1000000);
const __m256i vmax_exponent = _mm256_set1_epi32(0x3F800000);
const __m256i vdefault_exponent = vmax_exponent;
const __m256i vmantissa_mask = _mm256_set1_epi32(0x007FFFF8);
for (; n != 0; n -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
// Compute reduced argument n := round(x * 8 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of result to an integer, then subtracing the
// large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x| <= 2**22), but that's ok, because
// inputs outside of [-103.97207, 88.72283] underflow or overflow expf(x) anyway. We fixup the result for such
// inputs at the very end of the algorithm.
__m256 vn = _mm256_fmadd_ps(vx, vlog2e_x8, vmagic_bias);
// Create two floating-point numbers, sn (scale, normal) and so (scale, overflow) such that sn * so == 2**n
// for inputs which don't cause overflow, i.e. -103.97207 <= x <= 88.72283, and -150 <= n <= 128 accordingly.
// We need to use two numbers rather than one because a normalized single-precision exponent must be in [-127, 126]
// range, which is insufficient to cover [-150, 128] range of n.
// - When n is within [-127, 126], sn == 2**n and so == 1.0.
// - When n < -127, sn == 2**(-127) and so == 2**(n + 127).
// - When n > 126, sn == 2**126 and so == 2**(n - 126).
__m256i veo = _mm256_slli_epi32(_mm256_and_si256(_mm256_castps_si256(vn), vmantissa_mask), 20);
__m256i ven = _mm256_max_epi32(veo, vmin_exponent);
ven = _mm256_min_epi32(ven, vmax_exponent);
veo = _mm256_sub_epi32(veo, ven);
const __m256 vsn = _mm256_castsi256_ps(_mm256_add_epi32(ven, vdefault_exponent));
const __m256 vso = _mm256_castsi256_ps(_mm256_add_epi32(veo, vdefault_exponent));
// Use the low 3 bits of n (as integer) for table lookup.
__m256 vl = _mm256_permutevar8x32_ps(vtable, _mm256_castps_si256(vn));
// Subtract the large number back to get final n := round(x * 8 / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2) / 8.
// Use Cody-Waite range reduction method (note two constants to represent log(2) / 8) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_o8_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_o8_lo, vt);
// Compute degree-3 polynomial approximation for exp(t) on [-log(2)/16, log(2)/16].
__m256 vp = _mm256_fmadd_ps(vt, vc4, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
// Reconstruct the final f value:
// f = so * sn * l * (1 + t * (1 + t * (c2 + t * (c3 + t * c4))))
// = so * sn * (l + l * (t + t * (t * (c2 + t * (c3 + t * c4)))))
// = so * sn * (l + l * p)
vl = _mm256_mul_ps(vl, vso);
vp = _mm256_mul_ps(vp, vt);
vp = _mm256_fmadd_ps(vt, vp, vt);
__m256 vf = _mm256_fmadd_ps(vl, vp, vl);
vf = _mm256_mul_ps(vf, vsn);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vzero_cutoff, _CMP_LT_OS), vf);
// For inputs above inf cutoff, replace output with +inf.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_blendv_ps(vf, vplus_inf, _mm256_cmp_ps(vx, vinf_cutoff, _CMP_GT_OS));
_mm256_storeu_ps(output, vf);
input += 8;
output += 8;
}
}
| 4,947
| 45.242991
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-exp-avx2-rr2-p5.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_exp__avx2_rr2_p5(
size_t n,
const float* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p+23f);
// The smallest x for which expf(x) is non-zero.
const __m256 vzero_cutoff = _mm256_set1_ps(-0x1.9FE368p+6f);
// The largest x for which expf(x) is finite.
const __m256 vinf_cutoff = _mm256_set1_ps(0x1.62E42Ep+6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vplus_inf = _mm256_set1_ps(INFINITY);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256i vmin_exponent = _mm256_set1_epi32(0xC1000000);
const __m256i vmax_exponent = _mm256_set1_epi32(0x3F800000);
const __m256i vdefault_exponent = vmax_exponent;
for (; n != 0; n -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of result to an integer, then subtracing the
// large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x| <= 2**22), but that's ok, because
// inputs outside of [-103.97207, 88.72283] underflow or overflow expf(x) anyway. We fixup the result for such
// inputs at the very end of the algorithm.
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create two floating-point numbers, sn (scale, normal) and so (scale, overflow) such that sn * so == 2**n
// for inputs which don't cause overflow, i.e. -103.97207 <= x <= 88.72283, and -150 <= n <= 128 accordingly.
// We need to use two numbers rather than one because a normalized single-precision exponent must be in [-127, 126]
// range, which is insufficient to cover [-150, 128] range of n.
// - When n is within [-127, 126], sn == 2**n and so == 1.0.
// - When n < -127, sn == 2**(-127) and so == 2**(n + 127).
// - When n > 126, sn == 2**126 and so == 2**(n - 126).
__m256i veo = _mm256_slli_epi32(_mm256_castps_si256(vn), 23);
__m256i ven = _mm256_max_epi32(veo, vmin_exponent);
ven = _mm256_min_epi32(ven, vmax_exponent);
veo = _mm256_sub_epi32(veo, ven);
const __m256 vsn = _mm256_castsi256_ps(_mm256_add_epi32(ven, vdefault_exponent));
const __m256 vso = _mm256_castsi256_ps(_mm256_add_epi32(veo, vdefault_exponent));
// Subtract the large number back to get final n := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = so * sn * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = sn * (so + (t * so) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))))
// = sn * (so + (t * so) * p)
vt = _mm256_mul_ps(vt, vso);
__m256 vf = _mm256_mul_ps(vsn, _mm256_fmadd_ps(vt, vp, vso));
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vzero_cutoff, _CMP_LT_OS), vf);
// For inputs above inf cutoff, replace output with +inf.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_blendv_ps(vf, vplus_inf, _mm256_cmp_ps(vx, vinf_cutoff, _CMP_GT_OS));
_mm256_storeu_ps(output, vf);
input += 8;
output += 8;
}
}
| 4,593
| 45.40404
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-exp-avx512f-rr2-lut16-p3-perm-scalef.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm_scalef(
size_t n,
const float* input,
float* output)
{
assert(n % (16 * sizeof(float)) == 0);
const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p19f);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62e43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05c61p-29f);
const __m512 vc2 = _mm512_set1_ps(0x1.00021Ep-1f);
const __m512 vc3 = _mm512_set1_ps(0x1.55559Ap-3f);
const __m512 vtable = _mm512_set_ps(
0x1.EA4AFAp+0f, 0x1.D5818Ep+0f, 0x1.C199BEp+0f, 0x1.AE89FAp+0f,
0x1.9C4918p+0f, 0x1.8ACE54p+0f, 0x1.7A1148p+0f, 0x1.6A09E6p+0f,
0x1.5AB07Ep+0f, 0x1.4BFDAEp+0f, 0x1.3DEA64p+0f, 0x1.306FE0p+0f,
0x1.2387A6p+0f, 0x1.172B84p+0f, 0x1.0B5586p+0f, 0x1.000000p+0f);
for (; n != 0; n -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
// Compute reduced argument n := round(x / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of result to an 4 fractional bits, then
// subtracing the large number back. The first addition is combined with multiplication by log2e into a single
// FMA instruction. The trick with adding large number is valid only within certain bounds (|x| <= 2**18), but
// that's ok, because inputs outside of [-103.97207, 88.72283] underflow or overflow expf(x) anyway. We fixup
// the result for such inputs at the very end of the algorithm.
__m512 vn = _mm512_fmadd_ps(vx, vlog2e, vmagic_bias);
// Use the low 4 bits of n (as integer) for table lookup.
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
// Subtract the large number back to get final n := round(x / log(2), 4).
vn = _mm512_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-3 polynomial approximation for exp(t) on [-log(2)/32, log(2)/32].
// P = l * (1 + t * (1 + t * (c2 + t * c3)))
// = l + l * (t + t * (t * (c2 + t * c3)))
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
// Reconstruct the final value as f = exp2(floor(n)) * p.
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_storeu_ps(output, vf);
input += 16;
output += 16;
}
}
| 2,879
| 39
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-exp-avx512f-rr2-lut16-p3-perm.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm(
size_t n,
const float* input,
float* output)
{
assert(n % (16 * sizeof(float)) == 0);
const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p23f);
const __m512 vlog2e_x16 = _mm512_set1_ps(0x1.715476p4f);
// The smallest x for which expf(x) is non-zero.
const __m512 vzero_cutoff = _mm512_set1_ps(-0x1.9FE368p6f);
// The largest x for which expf(x) is finite.
const __m512 vinf_cutoff = _mm512_set1_ps(0x1.62E42Ep6f);
const __m512 vminus_ln2_o16_hi = _mm512_set1_ps(-0x1.62e43p-5f);
const __m512 vminus_ln2_o16_lo = _mm512_set1_ps(0x1.05c61p-33f);
const __m512 vplus_inf = _mm512_set1_ps(INFINITY);
const __m512 vc2 = _mm512_set1_ps(0x1.00021Ep-1f);
const __m512 vc3 = _mm512_set1_ps(0x1.55559Ap-3f);
const __m512 vtable = _mm512_set_ps(
0x1.EA4AFAp+0f, 0x1.D5818Ep+0f, 0x1.C199BEp+0f, 0x1.AE89FAp+0f,
0x1.9C4918p+0f, 0x1.8ACE54p+0f, 0x1.7A1148p+0f, 0x1.6A09E6p+0f,
0x1.5AB07Ep+0f, 0x1.4BFDAEp+0f, 0x1.3DEA64p+0f, 0x1.306FE0p+0f,
0x1.2387A6p+0f, 0x1.172B84p+0f, 0x1.0B5586p+0f, 0x1.000000p+0f);
const __m512i vmin_exponent = _mm512_set1_epi32(0xC1000000);
const __m512i vmax_exponent = _mm512_set1_epi32(0x3F800000);
const __m512i vdefault_exponent = vmax_exponent;
const __m512i vmantissa_mask = _mm512_set1_epi32(0x007FFFF0);
for (; n != 0; n -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
// Compute reduced argument n := round(x * 16 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of result to an integer, then subtracing the
// large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x| <= 2**22), but that's ok, because
// inputs outside of [-103.97207, 88.72283] underflow or overflow expf(x) anyway. We fixup the result for such
// inputs at the very end of the algorithm.
__m512 vn = _mm512_fmadd_ps(vx, vlog2e_x16, vmagic_bias);
// Detect underflow and overflow of expf(x) for further special handling.
const __mmask16 vinvof = _mm512_cmp_ps_mask(vx, vinf_cutoff, _CMP_NGT_UQ);
const __mmask16 vinvuf = _mm512_cmp_ps_mask(vx, vzero_cutoff, _CMP_NLT_UQ);
// Create two floating-point numbers, sn (scale, normal) and so (scale, overflow) such that sn * so == 2**n
// for inputs which don't cause overflow, i.e. -103.97207 <= x <= 88.72283, and -150 <= n <= 128 accordingly.
// We need to use two numbers rather than one because a normalized single-precision exponent must be in [-127, 126]
// range, which is insufficient to cover [-150, 128] range of n.
// - When n is within [-127, 126], sn == 2**n and so == 1.0.
// - When n < -127, sn == 2**(-127) and so == 2**(n + 127).
// - When n > 126, sn == 2**126 and so == 2**(n - 126).
__m512i veo = _mm512_slli_epi32(_mm512_and_si512(_mm512_castps_si512(vn), vmantissa_mask), 19);
__m512i ven = _mm512_max_epi32(veo, vmin_exponent);
ven = _mm512_min_epi32(ven, vmax_exponent);
veo = _mm512_sub_epi32(veo, ven);
const __m512 vsn = _mm512_castsi512_ps(_mm512_add_epi32(ven, vdefault_exponent));
const __m512 vso = _mm512_castsi512_ps(_mm512_maskz_add_epi32(vinvuf, veo, vdefault_exponent));
// Use the low 4 bits of n (as integer) for table lookup.
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
// Subtract the large number back to get final n := round(x * 16 / log(2)).
vn = _mm512_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2) / 16.
// Use Cody-Waite range reduction method (note two constants to represent log(2) / 16) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_o16_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_o16_lo, vt);
// Compute degree-3 polynomial approximation for exp(t) on [-log(2)/32, log(2)/32].
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
// Reconstruct the final f value:
// f = so * sn * l * (1 + t * (1 + t * (c2 + t * c3)))
// = so * sn * (l + l * (t + t * (t * (c2 + t * c3))))
// = so * sn * (l + l * p)
__m512 vf = _mm512_fmadd_ps(vl, vp, vl);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm512_maskz_mul_ps(vinvuf, vf, vsn);
// For inputs above inf cutoff, replace output with +inf.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm512_mask_mul_ps(vplus_inf, vinvof, vso, vf);
_mm512_storeu_ps(output, vf);
input += 16;
output += 16;
}
}
| 5,096
| 46.635514
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-exp-avx512f-rr2-lut32-p2-perm2-scalef.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2_scalef(
size_t n,
const float* input,
float* output)
{
assert(n % (16 * sizeof(float)) == 0);
const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p18f);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62e43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05c61p-29f);
const __m512 vc1 = _mm512_set1_ps(0x1.0000F6p-0f);
const __m512 vc2 = _mm512_set1_ps(0x1.000000p-1f);
const __m512 vtable_hi = _mm512_set_ps(
0x1.F50766p+0f, 0x1.EA4AFAp+0f, 0x1.DFC974p+0f, 0x1.D5818Ep+0f,
0x1.CB720Ep+0f, 0x1.C199BEp+0f, 0x1.B7F770p+0f, 0x1.AE89FAp+0f,
0x1.A5503Cp+0f, 0x1.9C4918p+0f, 0x1.93737Cp+0f, 0x1.8ACE54p+0f,
0x1.82589Ap+0f, 0x1.7A1148p+0f, 0x1.71F75Ep+0f, 0x1.6A09E6p+0f);
const __m512 vtable_lo = _mm512_set_ps(
0x1.6247ECp+0f, 0x1.5AB07Ep+0f, 0x1.5342B6p+0f, 0x1.4BFDAEp+0f,
0x1.44E086p+0f, 0x1.3DEA64p+0f, 0x1.371A74p+0f, 0x1.306FE0p+0f,
0x1.29E9E0p+0f, 0x1.2387A6p+0f, 0x1.1D4874p+0f, 0x1.172B84p+0f,
0x1.11301Ep+0f, 0x1.0B5586p+0f, 0x1.059B0Ep+0f, 0x1.000000p+0f);
for (; n != 0; n -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
// Compute reduced argument n := round(x / log(2), 5).
// We do it by adding a large number (magic bias), which cause rounding of result to 5 fractional bits, then
// subtracing the large number back. The first addition is combined with multiplication by log2e into a single FMA
// instruction. The trick with adding large number is valid only within certain bounds (|x| <= 2**17), but that's
// ok, because inputs outside of [-103.97207, 88.72283] underflow or overflow expf(x) anyway. We fixup the result
// for such inputs at the very end of the algorithm.
__m512 vn = _mm512_fmadd_ps(vx, vlog2e, vmagic_bias);
// Use the low 5 bits of n (as integer) for table lookup.
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
// Subtract the large number back to get final n := round(x / log(2), 5).
vn = _mm512_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/64, log(2)/64].
// p = l * (1 + t * (c1 + t * c2))
// = l + l * t * (c1 + t * c2)
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
// Reconstruct the final value as f = exp2(floor(n)) * p.
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_storeu_ps(output, vf);
input += 16;
output += 16;
}
}
| 3,150
| 40.460526
| 118
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-exp-avx512f-rr2-lut32-p2-perm2.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2(
size_t n,
const float* input,
float* output)
{
assert(n % (16 * sizeof(float)) == 0);
const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p23f);
const __m512 vlog2e_x32 = _mm512_set1_ps(0x1.715476p5f);
// The smallest x for which expf(x) is non-zero.
const __m512 vzero_cutoff = _mm512_set1_ps(-0x1.9FE368p6f);
// The largest x for which expf(x) is finite.
const __m512 vinf_cutoff = _mm512_set1_ps(0x1.62E42Ep6f);
const __m512 vminus_ln2_o32_hi = _mm512_set1_ps(-0x1.62e43p-6f);
const __m512 vminus_ln2_o32_lo = _mm512_set1_ps(0x1.05c61p-34f);
const __m512 vplus_inf = _mm512_set1_ps(INFINITY);
const __m512 vc1 = _mm512_set1_ps(0x1.0000F6p-0f);
const __m512 vc2 = _mm512_set1_ps(0x1.000000p-1f);
const __m512 vtable_hi = _mm512_set_ps(
0x1.F50766p+0f, 0x1.EA4AFAp+0f, 0x1.DFC974p+0f, 0x1.D5818Ep+0f,
0x1.CB720Ep+0f, 0x1.C199BEp+0f, 0x1.B7F770p+0f, 0x1.AE89FAp+0f,
0x1.A5503Cp+0f, 0x1.9C4918p+0f, 0x1.93737Cp+0f, 0x1.8ACE54p+0f,
0x1.82589Ap+0f, 0x1.7A1148p+0f, 0x1.71F75Ep+0f, 0x1.6A09E6p+0f);
const __m512 vtable_lo = _mm512_set_ps(
0x1.6247ECp+0f, 0x1.5AB07Ep+0f, 0x1.5342B6p+0f, 0x1.4BFDAEp+0f,
0x1.44E086p+0f, 0x1.3DEA64p+0f, 0x1.371A74p+0f, 0x1.306FE0p+0f,
0x1.29E9E0p+0f, 0x1.2387A6p+0f, 0x1.1D4874p+0f, 0x1.172B84p+0f,
0x1.11301Ep+0f, 0x1.0B5586p+0f, 0x1.059B0Ep+0f, 0x1.000000p+0f);
const __m512i vmin_exponent = _mm512_set1_epi32(0xC1000000);
const __m512i vmax_exponent = _mm512_set1_epi32(0x3F800000);
const __m512i vdefault_exponent = vmax_exponent;
const __m512i vmantissa_mask = _mm512_set1_epi32(0x007FFFE0);
for (; n != 0; n -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
// Compute reduced argument n := round(x * 32 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of result to an integer, then subtracing the
// large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x| <= 2**22), but that's ok, because
// inputs outside of [-103.97207, 88.72283] underflow or overflow expf(x) anyway. We fixup the result for such
// inputs at the very end of the algorithm.
__m512 vn = _mm512_fmadd_ps(vx, vlog2e_x32, vmagic_bias);
// Detect underflow and overflow of expf(x) for further special handling.
const __mmask16 vinvof = _mm512_cmp_ps_mask(vx, vinf_cutoff, _CMP_NGT_UQ);
const __mmask16 vinvuf = _mm512_cmp_ps_mask(vx, vzero_cutoff, _CMP_NLT_UQ);
// Create two floating-point numbers, sn (scale, normal) and so (scale, overflow) such that sn * so == 2**n
// for inputs which don't cause overflow, i.e. -103.97207 <= x <= 88.72283, and -150 <= n <= 128 accordingly.
// We need to use two numbers rather than one because a normalized single-precision exponent must be in [-127, 126]
// range, which is insufficient to cover [-150, 128] range of n.
// - When n is within [-127, 126], sn == 2**n and so == 1.0.
// - When n < -127, sn == 2**(-127) and so == 2**(n + 127).
// - When n > 126, sn == 2**126 and so == 2**(n - 126).
__m512i veo = _mm512_slli_epi32(_mm512_and_si512(_mm512_castps_si512(vn), vmantissa_mask), 18);
__m512i ven = _mm512_max_epi32(veo, vmin_exponent);
ven = _mm512_min_epi32(ven, vmax_exponent);
veo = _mm512_sub_epi32(veo, ven);
const __m512 vsn = _mm512_castsi512_ps(_mm512_add_epi32(ven, vdefault_exponent));
const __m512 vso = _mm512_castsi512_ps(_mm512_maskz_add_epi32(vinvuf, veo, vdefault_exponent));
// Use the low 5 bits of n (as integer) for table lookup.
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
// Subtract the large number back to get final n := round(x * 32 / log(2)).
vn = _mm512_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2) / 32.
// Use Cody-Waite range reduction method (note two constants to represent log(2) / 32) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_o32_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_o32_lo, vt);
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/64, log(2)/64].
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
// Reconstruct the final f value:
// f = so * sn * l * (1 + t * (c1 + t * c2))
// = so * sn * (l + l * t * (c1 + t * c2))
// = so * sn * (l + (l * t) * p)
vt = _mm512_mul_ps(vt, vl);
__m512 vf = _mm512_fmadd_ps(vt, vp, vl);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm512_maskz_mul_ps(vinvuf, vf, vsn);
// For inputs above inf cutoff, replace output with +inf.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm512_mask_mul_ps(vplus_inf, vinvof, vso, vf);
_mm512_storeu_ps(output, vf);
input += 16;
output += 16;
}
}
| 5,376
| 47.441441
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-exp-avx512f-rr2-p5-scalef.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_exp__avx512f_rr2_p5_scalef(
size_t n,
const float* input,
float* output)
{
assert(n % (16 * sizeof(float)) == 0);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
// The smallest x for which expf(x) is non-zero.
const __m512 vzero_cutoff = _mm512_set1_ps(-0x1.9FE368p+6f);
// The largest x for which expf(x) is finite.
const __m512 vinf_cutoff = _mm512_set1_ps(0x1.62E42Ep+6f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
for (; n != 0; n -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
// Compute reduced argument n := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Detect underflow and overflow of expf(x) for further special handling.
// For large positive or negative inputs the range reduction may produce degenerate reduced arguments:
// - Reduced argument t can fall outside of [-log(2)/2, log(2)/2] range, leading to polynomial approximation p
// being negative, and exp(n) * p being either -0.0f (in underflow case) or -inf (in overflow case) instead of
// +0.0f and +inf respectively.
// - Reduced argument n can overflow and become +inf or -inf, and leading to NaN in reduced argument t.
const __mmask16 vinvof = _mm512_cmp_ps_mask(vx, vinf_cutoff, _CMP_NGT_UQ);
const __mmask16 vinvuf = _mm512_cmp_ps_mask(vx, vzero_cutoff, _CMP_NLT_UQ);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
// Use masking to explicitly zero the result for large positive inputs, to avoid propagating NaN in reduced
// argument t into further computations. Zeroing the reduced argument t would instead result in polynomial
// approximation being 1.0f, which correctly overflows to +inf when scaled by n = +inf.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_maskz_fmadd_ps(vinvof, vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final value as f = exp2(n) * p.
// Use masking to explicitly zero (set to +0.0f) the result for large negative inputs, because for some of these
// inputs the polynomial approximation p is negative and thus exp2(n) * p == -0.0f.
const __m512 vf = _mm512_maskz_scalef_ps(vinvuf, vp, vn);
_mm512_storeu_ps(output, vf);
input += 16;
output += 16;
}
}
| 3,337
| 42.350649
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-exp-avx512f-rr2-p5.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_exp__avx512f_rr2_p5(
size_t n,
const float* input,
float* output)
{
assert(n % (16 * sizeof(float)) == 0);
const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p+23f);
// The smallest x for which expf(x) is non-zero.
const __m512 vzero_cutoff = _mm512_set1_ps(-0x1.9FE368p+6f);
// The largest x for which expf(x) is finite.
const __m512 vinf_cutoff = _mm512_set1_ps(0x1.62E42Ep+6f);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vplus_inf = _mm512_set1_ps(INFINITY);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512i vmin_exponent = _mm512_set1_epi32(0xC1000000);
const __m512i vmax_exponent = _mm512_set1_epi32(0x3F800000);
const __m512i vdefault_exponent = vmax_exponent;
for (; n != 0; n -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of result to an integer, then subtracing the
// large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x| <= 2**22), but that's ok, because
// inputs outside of [-103.97207, 88.72283] underflow or overflow expf(x) anyway. We fixup the result for such
// inputs at the very end of the algorithm.
__m512 vn = _mm512_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create two floating-point numbers, sn (scale, normal) and so (scale, overflow) such that sn * so == 2**n
// for inputs which don't cause overflow, i.e. -103.97207 <= x <= 88.72283, and -150 <= n <= 128 accordingly.
// We need to use two numbers rather than one because a normalized single-precision exponent must be in [-127, 126]
// range, which is insufficient to cover [-150, 128] range of n.
// - When n is within [-127, 126], sn == 2**n and so == 1.0.
// - When n < -127, sn == 2**(-127) and so == 2**(n + 127).
// - When n > 126, sn == 2**126 and so == 2**(n - 126).
__m512i veo = _mm512_slli_epi32(_mm512_castps_si512(vn), 23);
__m512i ven = _mm512_max_epi32(veo, vmin_exponent);
ven = _mm512_min_epi32(ven, vmax_exponent);
veo = _mm512_sub_epi32(veo, ven);
const __m512 vsn = _mm512_castsi512_ps(_mm512_add_epi32(ven, vdefault_exponent));
const __m512 vso = _mm512_castsi512_ps(_mm512_add_epi32(veo, vdefault_exponent));
// Subtract the large number back to get final n := round(x / log(2)).
vn = _mm512_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = so * sn * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = sn * (so + (t * so) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))))
// = sn * (so + (t * so) * p)
vt = _mm512_mul_ps(vt, vso);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
__m512 vf = _mm512_maskz_fmadd_ps(_mm512_cmp_ps_mask(vx, vzero_cutoff, _CMP_NLT_US), vt, vp, vso);
// For inputs above inf cutoff, replace output with +inf.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm512_mask_mul_ps(vplus_inf, _mm512_cmp_ps_mask(vx, vinf_cutoff, _CMP_NGT_US), vsn, vf);
_mm512_storeu_ps(output, vf);
input += 16;
output += 16;
}
}
| 4,574
| 45.683673
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-exp-neonfma-rr2-lut64-p2.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 64) values, k = 0..63
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_math_f32_exp__neonfma_rr2_lut64_p2(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p23f);
// The smallest x for which expf(x) is non-zero.
const float32x4_t vzero_cutoff = vmovq_n_f32(-0x1.9FE368p6f);
// The largest x for which expf(x) is finite.
const float32x4_t vinf_cutoff = vmovq_n_f32(0x1.62E42Ep6f);
const float32x4_t vlog2e_x64 = vmovq_n_f32(0x1.715476p6f);
const float32x4_t vminus_ln2_o64_hi = vmovq_n_f32(-0x1.62e43p-7f);
const float32x4_t vminus_ln2_o64_lo = vmovq_n_f32(0x1.05c61p-35f);
const float32x4_t vplus_inf = vmovq_n_f32(INFINITY);
const float32x4_t vc2 = vmovq_n_f32(0x1.FFFF0Ap-2f);
const int32x4_t vmin_exponent = vmovq_n_s32(INT32_C(0xC1000000));
const int32x4_t vmax_exponent = vmovq_n_s32(INT32_C(0x3F800000));
const int32x4_t vdefault_exponent = vmax_exponent;
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
for (; n != 0; n -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
// Compute reduced argument n := round(x * 64 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of result to an integer, then subtracing the
// large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x| <= 2**22), but that's ok, because
// inputs outside of [-103.97207, 88.72283] underflow or overflow expf(x) anyway. We fixup the result for such
// inputs at the very end of the algorithm.
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e_x64);
// Create two floating-point numbers, sn (scale, normal) and so (scale, overflow) such that sn * so == 2**n
// for inputs which don't cause overflow, i.e. -103.97207 <= x <= 88.72283, and -150 <= n <= 128 accordingly.
// We need to use two numbers rather than one because a normalized single-precision exponent must be in [-127, 126]
// range, which is insufficient to cover [-150, 128] range of n.
// - When n is within [-127, 126], sn == 2**n and so == 1.0.
// - When n < -127, sn == 2**(-127) and so == 2**(n + 127).
// - When n > 126, sn == 2**126 and so == 2**(n - 126).
// While we explicitly compute sn, the so is fused into the value l fetched from a table by adjusting its exponential.
int32x4_t veo = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
int32x4_t ven = vmaxq_s32(veo, vmin_exponent);
ven = vminq_s32(ven, vmax_exponent);
veo = vsubq_s32(veo, ven);
const float32x4_t vsn = vreinterpretq_f32_s32(vaddq_s32(ven, vdefault_exponent));
// Use the low 6 bits of n (as integer) for table lookup.
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const uint64_t vidx01 = vgetq_lane_u64(vidx, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx, 1);
float32x2_t vl01 = vld1_dup_f32((const float*) ((uintptr_t) xnn_table_exp2_k_over_64 + (uint32_t) vidx01));
float32x2_t vl23 = vld1_dup_f32((const float*) ((uintptr_t) xnn_table_exp2_k_over_64 + (uint32_t) vidx23));
vl01 = vld1_lane_f32((const float*) ((uintptr_t) xnn_table_exp2_k_over_64 + (uint32_t) (vidx01 >> 32)), vl01, 1);
vl23 = vld1_lane_f32((const float*) ((uintptr_t) xnn_table_exp2_k_over_64 + (uint32_t) (vidx23 >> 32)), vl23, 1);
float32x4_t vl = vcombine_f32(vl01, vl23);
// Fuse so into the value l fetched from a table by adjusting its exponential.
vl = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), veo));
// Subtract the large number back to get final n := round(x * 64 / log(2)).
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2) / 64.
// Use Cody-Waite range reduction method (note two constants to represent log(2) / 64) to improve accuracy.
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2_o64_hi);
vt = vfmaq_f32(vt, vn, vminus_ln2_o64_lo);
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
// Reconstruct the final f value:
// f = sn * (so * l) * (1 + t * (1 + t * c2))
// = sn * (so * l) * (1 + t + t * (t * c2))
// = sn * ((so * l) + (so * l) * (t + t * (t * c2)))
float32x4_t vf = vfmaq_f32(vl, vl, vp);
vf = vmulq_f32(vf, vsn);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vzero_cutoff)));
// For inputs above inf cutoff, replace output with +inf.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vbslq_f32(vcgtq_f32(vx, vinf_cutoff), vplus_inf, vf);
vst1q_f32(output, vf); output += 4;
}
}
| 5,500
| 49.935185
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-exp-neonfma-rr2-p5.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_exp__neonfma_rr2_p5(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p+23f);
// The smallest x for which expf(x) is non-zero.
const float32x4_t vzero_cutoff = vmovq_n_f32(-0x1.9FE368p+6f);
// The largest x for which expf(x) is finite.
const float32x4_t vinf_cutoff = vmovq_n_f32(0x1.62E42Ep+6f);
const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E43p-1f);
const float32x4_t vminus_ln2_lo = vmovq_n_f32(0x1.05C61p-29f);
const float32x4_t vplus_inf = vmovq_n_f32(INFINITY);
const float32x4_t vc1 = vmovq_n_f32(0x1.FFFFF6p-1f);
const float32x4_t vc2 = vmovq_n_f32(0x1.FFFDC6p-2f);
const float32x4_t vc3 = vmovq_n_f32(0x1.555A80p-3f);
const float32x4_t vc4 = vmovq_n_f32(0x1.573A1Ap-5f);
const float32x4_t vc5 = vmovq_n_f32(0x1.0F9F9Cp-7f);
const int32x4_t vmin_exponent = vmovq_n_s32(INT32_C(0xC1000000));
const int32x4_t vmax_exponent = vmovq_n_s32(INT32_C(0x3F800000));
const int32x4_t vdefault_exponent = vmax_exponent;
for (; n != 0; n -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of result to an integer, then subtracing the
// large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x| <= 2**22), but that's ok, because
// inputs outside of [-103.97207, 88.72283] underflow or overflow expf(x) anyway. We fixup the result for such
// inputs at the very end of the algorithm.
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
// Create two floating-point numbers, sn (scale, normal) and so (scale, overflow) such that sn * so == 2**n
// for inputs which don't cause overflow, i.e. -103.97207 <= x <= 88.72283, and -150 <= n <= 128 accordingly.
// We need to use two numbers rather than one because a normalized single-precision exponent must be in [-127, 126]
// range, which is insufficient to cover [-150, 128] range of n.
// - When n is within [-127, 126], sn == 2**n and so == 1.0.
// - When n < -127, sn == 2**(-127) and so == 2**(n + 127).
// - When n > 126, sn == 2**126 and so == 2**(n - 126).
int32x4_t veo = vshlq_n_s32(vreinterpretq_s32_f32(vn), 23);
int32x4_t ven = vmaxq_s32(veo, vmin_exponent);
ven = vminq_s32(ven, vmax_exponent);
veo = vsubq_s32(veo, ven);
const float32x4_t vsn = vreinterpretq_f32_s32(vaddq_s32(ven, vdefault_exponent));
const float32x4_t vso = vreinterpretq_f32_s32(vaddq_s32(veo, vdefault_exponent));
// Subtract the large number back to get final n := round(x / log(2)).
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2_hi);
vt = vfmaq_f32(vt, vn, vminus_ln2_lo);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
// Reconstruct the final f value:
// f = so * sn * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = sn * (so + (t * so) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))))
// = sn * (so + (t * so) * p)
vt = vmulq_f32(vt, vso);
float32x4_t vf = vmulq_f32(vsn, vfmaq_f32(vso, vt, vp));
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vzero_cutoff)));
// For inputs above inf cutoff, replace output with +inf.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vbslq_f32(vcgtq_f32(vx, vinf_cutoff), vplus_inf, vf);
vst1q_f32(output, vf); output += 4;
}
}
| 4,573
| 46.154639
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-exp-sse2-rr2-p5.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <emmintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_exp__sse2_rr2_p5(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p+23f);
// The smallest x for which expf(x) is non-zero.
const __m128 vzero_cutoff = _mm_set1_ps(-0x1.9FE368p+6f);
// The largest x for which expf(x) is finite.
const __m128 vinf_cutoff = _mm_set1_ps(0x1.62E42Ep+6f);
const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
// Last 8 bits are zeroes
const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
const __m128 vplus_inf = _mm_set1_ps(INFINITY);
const __m128 vc1 = _mm_set1_ps(0x1.FFFFF6p-1f);
const __m128 vc2 = _mm_set1_ps(0x1.FFFDC6p-2f);
const __m128 vc3 = _mm_set1_ps(0x1.555A80p-3f);
const __m128 vc4 = _mm_set1_ps(0x1.573A1Ap-5f);
const __m128 vc5 = _mm_set1_ps(0x1.0F9F9Cp-7f);
const __m128i vmin_exponent = _mm_set1_epi32(0xC1000000);
const __m128i vmax_exponent = _mm_set1_epi32(0x3F800000);
const __m128i vdefault_exponent = vmax_exponent;
for (; n != 0; n -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The trick with adding large number is valid only within
// certain bounds (|x| <= 2**22), but that's ok, because inputs outside of [-103.97207, 88.72283] underflow or
// overflow expf(x) anyway. We fixup the result for such inputs at the very end of the algorithm.
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create two floating-point numbers, sn (scale, normal) and so (scale, overflow) such that sn * so == 2**n
// for inputs which don't cause overflow, i.e. -103.97207 <= x <= 88.72283, and -150 <= n <= 128 accordingly.
// We need to use two numbers rather than one because a normalized single-precision exponent must be in [-127, 126]
// range, which is insufficient to cover [-150, 128] range of n.
// - When n is within [-127, 126], sn == 2**n and so == 1.0.
// - When n < -127, sn == 2**(-127) and so == 2**(n + 127).
// - When n > 126, sn == 2**126 and so == 2**(n - 126).
__m128i veo = _mm_slli_epi32(_mm_castps_si128(vn), 23);
__m128i ven = _mm_max_epi16(veo, vmin_exponent);
ven = _mm_min_epi16(ven, vmax_exponent);
veo = _mm_sub_epi32(veo, ven);
const __m128 vsn = _mm_castsi128_ps(_mm_add_epi32(ven, vdefault_exponent));
const __m128 vso = _mm_castsi128_ps(_mm_add_epi32(veo, vdefault_exponent));
// Subtract the large number back to get final n := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = so * sn * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = sn * (so + (t * so) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))))
// = sn * (so + (t * so) * p)
vt = _mm_mul_ps(vt, vso);
__m128 vf = _mm_mul_ps(vsn, _mm_add_ps(_mm_mul_ps(vt, vp), vso));
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vzero_cutoff), vf);
// For inputs above inf cutoff, replace output with +inf.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
const __m128 vm = _mm_cmpgt_ps(vx, vinf_cutoff);
vf = _mm_or_ps(_mm_and_ps(vplus_inf, vm), _mm_andnot_ps(vm, vf));
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
}
| 4,573
| 44.287129
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-avx-rr2-lut4-p4-perm.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expm1minus__avx_rr2_lut4_p4_perm(
size_t n,
const float* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == exp2(-2) and magic bias === 127 mod 2**20.
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8003F8p21f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
// Mask for the lowest 2 bits
const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x3));
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
const __m256 vtable = _mm256_set_ps(
0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f,
0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f);
// Last 7 bits are zeroes
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/8, log(2)/8]
const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
const __m256 vone = _mm256_set1_ps(1.0f);
for (; n != 0; n -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. The order of operands in the [V]MAXPS instruction matters: it ensures that NaN
// inputs are passed unchanged.
vx = _mm256_max_ps(vsat_cutoff, vx);
// Compute reduced argument n := round(x / log(2), 2).
// We do it by adding a large number (magic bias), which cause rounding of the result to 2 fractional bits, then
// subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**18, i.e. |x| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**n for valid inputs, i.e. -17.328680 <= x <= 0.0. As n
// has 2 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in two steps:
// 1. Fetch 2**frac(n) from the table using the 2 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -17.328680 <= x <= 0.0 we have -25 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -25.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const __m256 ven = _mm256_andnot_ps(vindex_mask, vn);
const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
// Adjust exponent of the value l fetched from the table to get the final s value.
const __m256 vs = _mm256_mul_ps(vl, _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1));
// Subtract the large number back to get final n := round(x / log(2), 2).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-4 polynomial approximation for exp(t) - 1 on [-log(2)/8, log(2)/8].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * c4))) = t + t * (t * (c2 + t * (c3 + t * c4))) = t + t * p
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * c4)))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = _mm256_mul_ps(vt, vs);
const __m256 vsm1 = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 vf = _mm256_add_ps(vp, vsm1);
_mm256_storeu_ps(output, vf);
input += 8;
output += 8;
}
}
| 5,395
| 48.504587
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-avx-rr2-p6.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expm1minus__avx_rr2_p6(
size_t n,
const float* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
// Last 5 bits are zeroes
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E440p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.0105C6p-21f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/2, log(2)/2]
const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
const __m256 vone = _mm256_set1_ps(1.0f);
for (; n != 0; n -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. The order of operands in the [V]MAXPS instruction matters: it ensures that NaN
// inputs are passed unchanged.
vx = _mm256_max_ps(vsat_cutoff, vx);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
// the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**22, i.e. |x| <= 0x1.62E43p+21 = 2907270.0), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for valid inputs, i.e.
// -17.328680 <= x <= 0.0, and -25 <= n <= 0 accordingly.
// For NaN inputs, s would have zero mantissa and can have arbitrary sign and exponent, depending on the input
// NaN payload. In these cases, n and t are NaNs with the same payload as input while s is non-NaN, and thus
// input payload would be propagated in all computations.
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
// Subtract the large number back to get final n := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-6 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) = t + t * p
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = _mm256_mul_ps(vt, vs);
const __m256 vsm1 = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 vf = _mm256_add_ps(vp, vsm1);
_mm256_storeu_ps(output, vf);
input += 8;
output += 8;
}
}
| 4,629
| 46.731959
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-avx2-rr1-lut16-p3-gather.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_expm1minus__avx2_rr1_lut16_p3_gather(
size_t n,
const float* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == exp2(-4)
const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
// Mask for the lowest 4 bits
const __m256i vindex_mask = _mm256_set1_epi32(0xF);
const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * c3))
// on [-log(2)/32, log(2)/32]
const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
const __m256 vone = _mm256_set1_ps(1.0f);
for (; n != 0; n -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. The order of operands in the [V]MAXPS instruction matters: it ensures that NaN
// inputs are passed unchanged.
vx = _mm256_max_ps(vsat_cutoff, vx);
// Compute reduced argument n := round(x / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits, then
// subtracing the large number back. The first addition is combined with multiplication by log2e into a single FMA
// instruction. The trick with adding large number is valid only within certain bounds (|x / log(2)| <= 2**18,
// i.e. |x| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x are restricted to
// [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**n for valid inputs, i.e. -17.328680 <= x <= 0.0. As n
// has 4 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in two steps:
// 1. Fetch 2**frac(n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -17.328680 <= x <= 0.0 we have -25 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -25.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32((const int*) xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
// Adjust exponent of the value l fetched from the table to get the final s value.
const __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
// Subtract the large number back to get final n := round(x / log(2), 4).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
// Compute degree-3 polynomial approximation for exp(t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (1 + t * (c2 + t * c3)) = t + t * (t * (c2 + t * c3)) = t + t * p
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * c3))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = _mm256_mul_ps(vt, vs);
const __m256 vsm1 = _mm256_sub_ps(vs, vone);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 vf = _mm256_add_ps(vp, vsm1);
_mm256_storeu_ps(output, vf);
input += 8;
output += 8;
}
}
| 4,739
| 45.470588
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-avx2-rr1-lut4-p4-perm.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expm1minus__avx2_rr1_lut4_p4_perm(
size_t n,
const float* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == exp2(-2)
const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p21f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
const __m256 vtable = _mm256_set_ps(
0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f,
0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f);
const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/8, log(2)/8]
const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
const __m256 vone = _mm256_set1_ps(1.0f);
for (; n != 0; n -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. The order of operands in the VMAXPS instruction matters: it ensures that NaN
// inputs are passed unchanged.
vx = _mm256_max_ps(vsat_cutoff, vx);
// Compute reduced argument n := round(x / log(2), 2).
// We do it by adding a large number (magic bias), which cause rounding of the result to 2 fractional bits, then
// subtracing the large number back. The first addition is combined with multiplication by log2e into a single FMA
// instruction. The trick with adding large number is valid only within certain bounds (|x / log(2)| <= 2**19,
// i.e. |x| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x are restricted to
// [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**n for valid inputs, i.e. -17.328680 <= x <= 0.0. As n
// has 4 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in two steps:
// 1. Fetch 2**frac(n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -17.328680 <= x <= 0.0 we have -25 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -25.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
// Adjust exponent of the value l fetched from the table to get the final s value.
const __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
// Subtract the large number back to get final n := round(x / log(2), 2).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
// Compute degree-4 polynomial approximation for exp(t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * c4))) = t + t * (t * (c2 + t * (c3 + t * c4))) = t + t * p
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * c4)))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = _mm256_mul_ps(vt, vs);
const __m256 vsm1 = _mm256_sub_ps(vs, vone);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 vf = _mm256_add_ps(vp, vsm1);
_mm256_storeu_ps(output, vf);
input += 8;
output += 8;
}
}
| 4,795
| 46.019608
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-avx2-rr1-lut8-p4-perm.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expm1minus__avx2_rr1_lut8_p4_perm(
size_t n,
const float* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == exp2(-3)
const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p20f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
const __m256i vtable = _mm256_set_epi32(
0x3F7AC0C7, 0x3F7744FD, 0x3F75672A, 0x3F7504F3, 0x3F75FED7, 0x3F7837F0, 0x3F7B95C2, 0x3F800000);
const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/16, log(2)/16]
const __m256 vc4 = _mm256_set1_ps(0x1.5558ECp-5f);
const __m256 vc3 = _mm256_set1_ps(0x1.555C20p-3f);
const __m256 vc2 = _mm256_set1_ps(0x1.000000p-1f);
const __m256 vone = _mm256_set1_ps(1.0f);
for (; n != 0; n -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. The order of operands in the VMAXPS instruction matters: it ensures that NaN
// inputs are passed unchanged.
vx = _mm256_max_ps(vsat_cutoff, vx);
// Compute reduced argument n := round(x / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits, then
// subtracing the large number back. The first addition is combined with multiplication by log2e into a single FMA
// instruction. The trick with adding large number is valid only within certain bounds (|x / log(2)| <= 2**19,
// i.e. |x| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x are restricted to
// [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**n for valid inputs, i.e. -17.328680 <= x <= 0.0. As n
// has 4 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in two steps:
// 1. Fetch 2**frac(n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -17.328680 <= x <= 0.0 we have -25 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -25.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
// Adjust exponent of the value l fetched from the table to get the final s value.
const __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
// Subtract the large number back to get final n := round(x / log(2), 3).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
// Compute degree-4 polynomial approximation for exp(t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * c4))) = t + t * (t * (c2 + t * (c3 + t * c4))) = t + t * p
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * c4)))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = _mm256_mul_ps(vt, vs);
const __m256 vsm1 = _mm256_sub_ps(vs, vone);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 vf = _mm256_add_ps(vp, vsm1);
_mm256_storeu_ps(output, vf);
input += 8;
output += 8;
}
}
| 4,751
| 46.049505
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-avx2-rr1-p6.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expm1minus__avx2_rr1_p6(
size_t n,
const float* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/2, log(2)/2]
const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
const __m256 vone = _mm256_set1_ps(1.0f);
for (; n != 0; n -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. The order of operands in the [V]MAXPS instruction matters: it ensures that NaN
// inputs are passed unchanged.
vx = _mm256_max_ps(vsat_cutoff, vx);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA
// instruction. The trick with adding large number is valid only within certain bounds (|x / log(2)| <= 2**22,
// i.e. |x| <= 0x1.62E43p+21 = 2907270.0), but that is acceptable, because inputs x are restricted to
// [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for valid inputs, i.e.
// -17.328680 <= x <= 0.0, and -25 <= n <= 0 accordingly.
// For NaN inputs, s would have zero mantissa and can have arbitrary sign and exponent, depending on the input
// NaN payload. In these cases, n and t are NaNs with the same payload as input while s is non-NaN, and thus
// input payload would be propagated in all computations.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final n := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
// Compute degree-6 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) = t + t * p
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = _mm256_mul_ps(vt, vs);
const __m256 vsm1 = _mm256_sub_ps(vs, vone);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 vf = _mm256_add_ps(vp, vsm1);
_mm256_storeu_ps(output, vf);
input += 8;
output += 8;
}
}
| 4,155
| 44.173913
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-avx512f-rr1-lut16-p3-perm.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expm1minus__avx512f_rr1_lut16_p3_perm(
size_t n,
const float* input,
float* output)
{
assert(n % (16 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == exp2(-4)
const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p19f);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
const __m512i vtable = _mm512_set_epi32(
0x3F7D257D, 0x3F7AC0C7, 0x3F78CCDF, 0x3F7744FD, 0x3F76248C, 0x3F75672A, 0x3F7508A4, 0x3F7504F3,
0x3F75583F, 0x3F75FED7, 0x3F76F532, 0x3F7837F0, 0x3F79C3D3, 0x3F7B95C2, 0x3F7DAAC3, 0x3F800000);
const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * c3))
// on [-log(2)/32, log(2)/32]
const __m512 vc3 = _mm512_set1_ps(0x1.55561Cp-3f);
const __m512 vc2 = _mm512_set1_ps(0x1.0001ECp-1f);
const __m512 vone = _mm512_set1_ps(1.0f);
for (; n != 0; n -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. The order of operands in the [V]MAXPS instruction matters: it ensures that NaN
// inputs are passed unchanged.
vx = _mm512_max_ps(vsat_cutoff, vx);
// Compute reduced argument n := round(x / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits, then
// subtracing the large number back. The first addition is combined with multiplication by log2e into a single FMA
// instruction. The trick with adding large number is valid only within certain bounds (|x / log(2)| <= 2**18,
// i.e. |x| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x are restricted to
// [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m512 vn = _mm512_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**n for valid inputs, i.e. -17.328680 <= x <= 0.0. As n
// has 4 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in two steps:
// 1. Fetch 2**frac(n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -17.328680 <= x <= 0.0 we have -25 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -25.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
// Adjust exponent of the value l fetched from the table to get the final s value.
const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
// Subtract the large number back to get final n := round(x / log(2), 4).
vn = _mm512_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
// Compute degree-3 polynomial approximation for exp(t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (1 + t * (c2 + t * c3)) = t + t * (t * (c2 + t * c3)) = t + t * p
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * c3))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = _mm512_mul_ps(vt, vs);
const __m512 vsm1 = _mm512_sub_ps(vs, vone);
vp = _mm512_fmadd_ps(vp, vt, vt);
const __m512 vf = _mm512_add_ps(vp, vsm1);
_mm512_storeu_ps(output, vf);
input += 16;
output += 16;
}
}
| 4,726
| 45.80198
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-avx512f-rr1-p6.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expm1minus__avx512f_rr1_p6(
size_t n,
const float* input,
float* output)
{
assert(n % (16 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const __m512 vmagic_bias = _mm512_set1_ps(0x1.8000FEp23f);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/2, log(2)/2]
const __m512 vc6 = _mm512_set1_ps(0x1.6b7338p-10f);
const __m512 vc5 = _mm512_set1_ps(0x1.12278Ep-7f);
const __m512 vc4 = _mm512_set1_ps(0x1.555716p-5f);
const __m512 vc3 = _mm512_set1_ps(0x1.5554B0p-3f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFFFEp-2f);
const __m512 vone = _mm512_set1_ps(1.0f);
for (; n != 0; n -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. The order of operands in the [V]MAXPS instruction matters: it ensures that NaN
// inputs are passed unchanged.
vx = _mm512_max_ps(vsat_cutoff, vx);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA
// instruction. The trick with adding large number is valid only within certain bounds (|x / log(2)| <= 2**22,
// i.e. |x| <= 0x1.62E43p+21 = 2907270.0), but that is acceptable, because inputs x are restricted to
// [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m512 vn = _mm512_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for valid inputs, i.e.
// -17.328680 <= x <= 0.0, and -25 <= n <= 0 accordingly.
// For NaN inputs, s would have zero mantissa and can have arbitrary sign and exponent, depending on the input
// NaN payload. In these cases, n and t are NaNs with the same payload as input while s is non-NaN, and thus
// input payload would be propagated in all computations.
const __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
// Subtract the large number back to get final n := round(x / log(2)).
vn = _mm512_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
// Compute degree-6 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) = t + t * p
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = _mm512_mul_ps(vt, vs);
const __m512 vsm1 = _mm512_sub_ps(vs, vone);
vp = _mm512_fmadd_ps(vp, vt, vt);
const __m512 vf = _mm512_add_ps(vp, vsm1);
_mm512_storeu_ps(output, vf);
input += 16;
output += 16;
}
}
| 4,162
| 44.25
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-neon-rr2-lut16-p3.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_expm1minus__neon_rr2_lut16_p3(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == exp2(-4)
const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
// Mask for the lowest 4 bits
const int32x4_t vindex_mask = vmovq_n_s32(0xF);
// Last 9 bits are zeroes
const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E400p-1f);
const float32x4_t vminus_ln2_lo = vmovq_n_f32(-0x1.7F7D1Cp-20f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * c3))
// on [-log(2)/32, log(2)/32]
const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vx = vmaxq_f32(vx, vsat_cutoff);
// Compute reduced argument n := round(x / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits, then
// subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**18, i.e. |x| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
// Create a floating-point number s (scale) such that s := 2**n for valid inputs, i.e. -17.328680 <= x <= 0.0. As n
// has 4 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in two steps:
// 1. Fetch 2**frac(n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -17.328680 <= x <= 0.0 we have -25 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -25.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
// Use bits 0:4 of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
float32x2_t vl_hi = vld1_dup_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
// Adjust exponent of the value l fetched from the table to get the final s value.
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ven));
// Subtract the large number back to get final n := round(x / log(2), 4).
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
// Compute degree-3 polynomial approximation for exp(t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (1 + t * (c2 + t * c3)) = t + t * (t * (c2 + t * c3)) = t + t * p
float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * c3))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = vmulq_f32(vt, vs);
const float32x4_t vsm1 = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t vf = vaddq_f32(vp, vsm1);
vst1q_f32(output, vf); output += 4;
}
}
| 5,367
| 49.168224
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-neon-rr2-p6.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expm1minus__neon_rr2_p6(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
// Last 5 bits are zeroes
const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E440p-1f);
const float32x4_t vminus_ln2_lo = vmovq_n_f32(0x1.0105C6p-21f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/2, log(2)/2]
const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vx = vmaxq_f32(vx, vsat_cutoff);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
// the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**22, i.e. |x| <= 0x1.62E43p+21 = 2907270.0), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
// Create a floating-point number s (scale) such that s == 2**n for valid inputs, i.e.
// -17.328680 <= x <= 0.0, and -25 <= n <= 0 accordingly.
// For NaN inputs, s would have zero mantissa and can have arbitrary sign and exponent, depending on the input
// NaN payload. In these cases, n and t are NaNs with the same payload as input while s is non-NaN, and thus
// input payload would be propagated in all computations.
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
// Subtract the large number back to get final n := round(x / log(2)).
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
// Compute degree-6 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) = t + t * p
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = vmulq_f32(vt, vs);
const float32x4_t vsm1 = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t vf = vaddq_f32(vp, vsm1);
vst1q_f32(output, vf); output += 4;
}
}
| 4,190
| 45.054945
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-neonfma-rr1-lut16-p3.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_expm1minus__neonfma_rr1_lut16_p3(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == exp2(-4)
const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
// Mask for the lowest 4 bits
const int32x4_t vindex_mask = vmovq_n_s32(0xF);
const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * c3))
// on [-log(2)/32, log(2)/32]
const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vx = vmaxq_f32(vx, vsat_cutoff);
// Compute reduced argument n := round(x / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits, then
// subtracing the large number back. The addition is combined with multiplication by log2e into a single FMA
// instruction. The trick with adding large number is valid only within certain bounds (|x / log(2)| <= 2**18, i.e.
// |x| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x are restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
// Create a floating-point number s (scale) such that s := 2**n for valid inputs, i.e. -17.328680 <= x <= 0.0. As n
// has 4 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in two steps:
// 1. Fetch 2**frac(n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -17.328680 <= x <= 0.0 we have -25 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -25.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
// Use bits 0:4 of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
float32x2_t vl_hi = vld1_dup_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
// Adjust exponent of the value l fetched from the table to get the final s value.
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ven));
// Subtract the large number back to get final n := round(x / log(2), 4).
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
// Compute degree-3 polynomial approximation for exp(t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (1 + t * (c2 + t * c3)) = t + t * (t * (c2 + t * c3)) = t + t * p
float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * c3))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = vmulq_f32(vt, vs);
const float32x4_t vsm1 = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t vf = vaddq_f32(vp, vsm1);
vst1q_f32(output, vf); output += 4;
}
}
| 5,204
| 49.533981
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-neonfma-rr1-p6.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expm1minus__neonfma_rr1_p6(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/2, log(2)/2]
const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vx = vmaxq_f32(vx, vsat_cutoff);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
// the large number back. The addition is combined with multiplication by log2e into a single FMA instruction. The
// trick with adding large number is valid only within certain bounds (|x / log(2)| <= 2**22, i.e.
// |x| <= 0x1.62E43p+21 = 2907270.0), but that is acceptable, because inputs x are restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
// Create a floating-point number s (scale) such that s == 2**n for valid inputs, i.e.
// -17.328680 <= x <= 0.0, and -25 <= n <= 0 accordingly.
// For NaN inputs, s would have zero mantissa and can have arbitrary sign and exponent, depending on the input
// NaN payload. In these cases, n and t are NaNs with the same payload as input while s is non-NaN, and thus
// input payload would be propagated in all computations.
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
// Subtract the large number back to get final n := round(x / log(2)).
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
// Compute degree-6 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) = t + t * p
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = vmulq_f32(vt, vs);
const float32x4_t vsm1 = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t vf = vaddq_f32(vp, vsm1);
vst1q_f32(output, vf); output += 4;
}
}
| 4,028
| 45.310345
| 118
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-scalar-rr2-lut16-p3.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_expm1minus__scalar_rr2_lut16_p3(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p19f;
const float vlog2e = 0x1.715476p+0f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
// The largest x for which expm1f(x) is saturated at -1.0f.
const float vsat_cutoff = -0x1.154246p+4f;
// Last 9 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * c3))
// on [-log(2)/32, log(2)/32]
const float vc3 = 0x1.55561Cp-3f;
const float vc2 = 0x1.0001ECp-1f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
float vx = *input++;
// Compute reduced argument n := round(x / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits, then
// subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**18, i.e. |x| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**n for valid inputs, i.e. -17.328680 <= x <= 0.0. As n
// has 4 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in two steps:
// 1. Fetch 2**frac(n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -17.328680 <= x <= 0.0 we have -25 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -25.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t ven = float_as_uint32(vn) << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
// Subtract the large number back to get final n := round(x / log(2), 4).
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
// Compute degree-3 polynomial approximation for exp(t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (1 + t * (c2 + t * c3)) = t + t * (t * (c2 + t * c3)) = t + t * p
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * c3))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt *= vs;
const float vsm1 = vs - vone;
vp = vp * vt + vt;
const float vf = vp + vsm1;
*output++ = vf;
}
}
| 4,259
| 41.6
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-scalar-rr2-lut16-p4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_expm1minus__scalar_rr2_lut16_p4(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// Large number such that ulp(magic bias) == exp2(-4)
const float vmagic_bias = 0x1.800000p19f;
const float vlog2e = 0x1.715476p+0f;
// Mask for the lowest 4 bits
const uint32_t vindex_mask = UINT32_C(0xF);
// The largest x for which expm1f(x) is saturated at -1.0f.
const float vsat_cutoff = -0x1.154246p+4f;
// Last 9 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/32, log(2)/32]
const float vc4 = 0x1.55563Ap-5f;
const float vc3 = 0x1.555708p-3f;
const float vc2 = 0x1.000000p-1f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
float vx = *input++;
// Compute reduced argument n := round(x / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits, then
// subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**18, i.e. |x| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**n for valid inputs, i.e. -17.328680 <= x <= 0.0. As n
// has 4 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in two steps:
// 1. Fetch 2**frac(n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -17.328680 <= x <= 0.0 we have -25 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -25.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const uint32_t ven = float_as_uint32(vn) << 19;
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
// Subtract the large number back to get final n := round(x / log(2), 4).
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
// Compute degree-4 polynomial approximation for exp(t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * c4))) = t + t * (t * (c2 + t * (c3 + t * c4))) = t + t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * c4)))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt *= vs;
const float vsm1 = vs - vone;
vp = vp * vt + vt;
const float vf = vp + vsm1;
*output++ = vf;
}
}
| 4,363
| 41.784314
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-scalar-rr2-lut4-p4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 4) values decremented (as integer) by (k << 21), k = 0..3
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_4[4];
void xnn_math_f32_expm1minus__scalar_rr2_lut4_p4(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// Large number such that ulp(magic bias) == exp2(-2)
const float vmagic_bias = 0x1.800000p21f;
const float vlog2e = 0x1.715476p+0f;
// Mask for the lowest 2 bits
const uint32_t vindex_mask = UINT32_C(0x3);
// The largest x for which expm1f(x) is saturated at -1.0f.
const float vsat_cutoff = -0x1.154246p+4f;
// Last 7 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/8, log(2)/8]
const float vc4 = 0x1.554F9Ap-5f;
const float vc3 = 0x1.557082p-3f;
const float vc2 = 0x1.000002p-1f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
float vx = *input++;
// Compute reduced argument n := round(x / log(2), 2).
// We do it by adding a large number (magic bias), which cause rounding of the result to 2 fractional bits, then
// subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**20, i.e. |x| <= 0x1.62E43p+19 = 726817.5), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**n for valid inputs, i.e. -17.328680 <= x <= 0.0. As n
// has 2 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in two steps:
// 1. Fetch 2**frac(n) from the table using the 2 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -17.328680 <= x <= 0.0 we have -25 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -25.
//
// Shift bits 2:10 into 23:31 (position of floating-point exponent).
const uint32_t ven = float_as_uint32(vn) << 21;
// Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
float vs = uint32_as_float(xnn_table_exp2minus_k_over_4[vidx] + ven);
// Subtract the large number back to get final n := round(x / log(2), 2).
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
// Compute degree-4 polynomial approximation for exp(t) - 1 on [-log(2)/8, log(2)/8].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * c4))) = t + t * (t * (c2 + t * (c3 + t * c4))) = t + t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * c4)))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt *= vs;
const float vsm1 = vs - vone;
vp = vp * vt + vt;
const float vf = vp + vsm1;
*output++ = vf;
}
}
| 4,351
| 41.666667
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-scalar-rr2-lut8-p3.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_expm1minus__scalar_rr2_lut8_p3(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// Large number such that ulp(magic bias) == exp2(-3)
const float vmagic_bias = 0x1.800000p20f;
const float vlog2e = 0x1.715476p+0f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
// The largest x for which expm1f(x) is saturated at -1.0f.
const float vsat_cutoff = -0x1.154246p+4f;
// Last 8 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * c3))
// on [-log(2)/16, log(2)/16]
const float vc3 = 0x1.555862p-3f;
const float vc2 = 0x1.0007ACp-1f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
float vx = *input++;
// Compute reduced argument n := round(x / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits, then
// subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**19, i.e. |x| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**n for valid inputs, i.e. -17.328680 <= x <= 0.0. As n
// has 3 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in two steps:
// 1. Fetch 2**frac(n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -17.328680 <= x <= 0.0 we have -25 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -25.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t ven = float_as_uint32(vn) << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
float vs = uint32_as_float(xnn_table_exp2minus_k_over_8[vidx] + ven);
// Subtract the large number back to get final n := round(x / log(2), 3).
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
// Compute degree-3 polynomial approximation for exp(t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = t * (1 + t * (c2 + t * c3)) = t + t * (t * (c2 + t * c3)) = t + t * p
float vp = vc3 * vt + vc2;
vp *= vt;
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * c3))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt *= vs;
const float vsm1 = vs - vone;
vp = vp * vt + vt;
const float vf = vp + vsm1;
*output++ = vf;
}
}
| 4,252
| 41.53
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-scalar-rr2-lut8-p4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 8) values decremented (as integer) by (k << 20), k = 0..7
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_8[8];
void xnn_math_f32_expm1minus__scalar_rr2_lut8_p4(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// Large number such that ulp(magic bias) == exp2(-3)
const float vmagic_bias = 0x1.800000p20f;
const float vlog2e = 0x1.715476p+0f;
// Mask for the lowest 3 bits
const uint32_t vindex_mask = UINT32_C(0x7);
// The largest x for which expm1f(x) is saturated at -1.0f.
const float vsat_cutoff = -0x1.154246p+4f;
// Last 8 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * c4)))
// on [-log(2)/16, log(2)/16]
const float vc4 = 0x1.5558ECp-5f;
const float vc3 = 0x1.555C20p-3f;
const float vc2 = 0x1.000000p-1f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
float vx = *input++;
// Compute reduced argument n := round(x / log(2), 3).
// We do it by adding a large number (magic bias), which cause rounding of the result to 3 fractional bits, then
// subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**19, i.e. |x| <= 0x1.62E43p+18 = 363408.75), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**n for valid inputs, i.e. -17.328680 <= x <= 0.0. As n
// has 3 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in two steps:
// 1. Fetch 2**frac(n) from the table using the 3 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -17.328680 <= x <= 0.0 we have -25 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -25.
//
// Shift bits 3:11 into 23:31 (position of floating-point exponent).
const uint32_t ven = float_as_uint32(vn) << 20;
// Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
float vs = uint32_as_float(xnn_table_exp2minus_k_over_8[vidx] + ven);
// Subtract the large number back to get final n := round(x / log(2), 3).
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
// Compute degree-4 polynomial approximation for exp(t) - 1 on [-log(2)/16, log(2)/16].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * c4))) = t + t * (t * (c2 + t * (c3 + t * c4))) = t + t * p
float vp = vc4 * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * c4)))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt *= vs;
const float vsm1 = vs - vone;
vp = vp * vt + vt;
const float vf = vp + vsm1;
*output++ = vf;
}
}
| 4,356
| 41.715686
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-scalar-rr2-p5.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expm1minus__scalar_rr2_p5(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const float vmagic_bias = 0x1.8000FEp23f;
const float vlog2e = 0x1.715476p+0f;
// The largest x for which expm1f(x) is saturated at -1.0f.
const float vsat_cutoff = -0x1.154246p+4f;
// Last 5 bits are zeroes
const float vminus_ln2_hi = -0x1.62E440p-1f;
const float vminus_ln2_lo = 0x1.0105C6p-21f;
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// on [-log(2)/2, log(2)/2]
const float vc5 = 0x1.113780p-7f;
const float vc4 = 0x1.5704DCp-5f;
const float vc3 = 0x1.555634p-3f;
const float vc2 = 0x1.FFFE70p-2f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
float vx = *input++;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
// the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**22, i.e. |x| <= 0x1.62E43p+21 = 2907270.0), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**n for valid inputs, i.e.
// -17.328680 <= x <= 0.0, and -25 <= n <= 0 accordingly.
float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(x / log(2)).
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
// Compute degree-5 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = t + t * (t * (c2 + t * (c3 + t * (c4 + t * c5)))) = t + t * p
float vp = vc5 * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt *= vs;
const float vsm1 = vs - vone;
vp = vp * vt + vt;
const float vf = vp + vsm1;
*output++ = vf;
}
}
| 3,339
| 36.954545
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-scalar-rr2-p6.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expm1minus__scalar_rr2_p6(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const float vmagic_bias = 0x1.8000FEp23f;
const float vlog2e = 0x1.715476p+0f;
// The largest x for which expm1f(x) is saturated at -1.0f.
const float vsat_cutoff = -0x1.154246p+4f;
// Last 5 bits are zeroes
const float vminus_ln2_hi = -0x1.62E440p-1f;
const float vminus_ln2_lo = 0x1.0105C6p-21f;
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/2, log(2)/2]
const float vc6 = 0x1.6b7338p-10f;
const float vc5 = 0x1.12278Ep-7f;
const float vc4 = 0x1.555716p-5f;
const float vc3 = 0x1.5554B0p-3f;
const float vc2 = 0x1.FFFFFEp-2f;
const float vone = 1.0f;
for (; n != 0; n -= sizeof(float)) {
float vx = *input++;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
// the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**22, i.e. |x| <= 0x1.62E43p+21 = 2907270.0), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**n for valid inputs, i.e.
// -17.328680 <= x <= 0.0, and -25 <= n <= 0 accordingly.
float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(x / log(2)).
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
// Compute degree-6 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) = t + t * p
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt *= vs;
const float vsm1 = vs - vone;
vp = vp * vt + vt;
const float vf = vp + vsm1;
*output++ = vf;
}
}
| 3,444
| 37.277778
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-sse2-rr2-lut16-p3.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_expm1minus__sse2_rr2_lut16_p3(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == exp2(-4)
const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p19f);
const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
// Mask for the lowest 4 bits
const __m128i vindex_mask = _mm_set1_epi32(0xF);
// Last 9 bits are zeroes
const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * c3))
// on [-log(2)/32, log(2)/32]
const __m128 vc3 = _mm_set1_ps(0x1.55561Cp-3f);
const __m128 vc2 = _mm_set1_ps(0x1.0001ECp-1f);
const __m128 vone = _mm_set1_ps(1.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
__m128 vx = _mm_loadu_ps(input);
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. The order of operands in the [V]MAXPS instruction matters: it ensures that NaN
// inputs are passed unchanged.
vx = _mm_max_ps(vsat_cutoff, vx);
// Compute reduced argument n := round(x / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits, then
// subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**18, i.e. |x| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**n for valid inputs, i.e. -17.328680 <= x <= 0.0. As n
// has 4 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in two steps:
// 1. Fetch 2**frac(n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -17.328680 <= x <= 0.0 we have -25 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -25.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
#if XNN_ARCH_X86_64
const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
const __m128i vl0 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
const __m128i vl1 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
const __m128i vl3 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
#else
const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx);
const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx, 2);
const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx, 4);
const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx, 6);
const __m128i vl0 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx0)));
const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx2)));
const __m128i vl1 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx1)));
const __m128i vl3 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx3)));
#endif
const __m128i vl = _mm_unpacklo_epi64(_mm_unpacklo_epi32(vl0, vl1), _mm_unpacklo_epi32(vl2, vl3));
// Adjust exponent of the value l fetched from the table to get the final s value.
const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
// Subtract the large number back to get final n := round(x / log(2), 4).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-3 polynomial approximation for exp(t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (1 + t * (c2 + t * c3)) = t + t * (t * (c2 + t * c3)) = t + t * p
__m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
vp = _mm_mul_ps(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * c3))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = _mm_mul_ps(vt, vs);
const __m128 vsm1 = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 vf = _mm_add_ps(vp, vsm1);
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
}
| 6,279
| 50.900826
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-sse2-rr2-p6.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <emmintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expm1minus__sse2_rr2_p6(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f);
const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
// Last 5 bits are zeroes
const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E440p-1f);
const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.0105C6p-21f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/2, log(2)/2]
const __m128 vc6 = _mm_set1_ps(0x1.6b7338p-10f);
const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-7f);
const __m128 vc4 = _mm_set1_ps(0x1.555716p-5f);
const __m128 vc3 = _mm_set1_ps(0x1.5554B0p-3f);
const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp-2f);
const __m128 vone = _mm_set1_ps(1.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
__m128 vx = _mm_loadu_ps(input);
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. The order of operands in the [V]MAXPS instruction matters: it ensures that NaN
// inputs are passed unchanged.
vx = _mm_max_ps(vsat_cutoff, vx);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
// the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**22, i.e. |x| <= 0x1.62E43p+21 = 2907270.0), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for valid inputs, i.e.
// -17.328680 <= x <= 0.0, and -25 <= n <= 0 accordingly.
// For NaN inputs, s would have zero mantissa and can have arbitrary sign and exponent, depending on the input
// NaN payload. In these cases, n and t are NaNs with the same payload as input while s is non-NaN, and thus
// input payload would be propagated in all computations.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final n := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-6 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) = t + t * p
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = _mm_mul_ps(vt, vs);
const __m128 vsm1 = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 vf = _mm_add_ps(vp, vsm1);
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
}
| 4,302
| 44.294737
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-wasmsimd-rr2-lut16-p3-andnot.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_expm1minus__wasmsimd_rr2_lut16_p3_andnot(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// Large number such that ulp(magic bias) == exp2(-4)
const v128_t vmagic_bias = wasm_f32x4_const_splat(0x1.800000p19f);
const v128_t vlog2e = wasm_f32x4_const_splat(0x1.715476p+0f);
// Mask for the lowest 4 bits
const v128_t vindex_mask = wasm_i32x4_const_splat(0xF);
// The largest x for which expm1f(x) is saturated at -1.0f.
const v128_t vsat_cutoff = wasm_f32x4_const_splat(-0x1.154246p+4f);
// Last 9 bits are zeroes
const v128_t vminus_ln2_hi = wasm_f32x4_const_splat(-0x1.62E400p-1f);
const v128_t vminus_ln2_lo = wasm_f32x4_const_splat(-0x1.7F7D1Cp-20f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * c3))
// on [-log(2)/32, log(2)/32]
const v128_t vc3 = wasm_f32x4_const_splat(0x1.55561Cp-3f);
const v128_t vc2 = wasm_f32x4_const_splat(0x1.0001ECp-1f);
const v128_t vone = wasm_f32x4_const_splat(1.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
// Compute reduced argument n := round(x / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits, then
// subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**18, i.e. |x| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**n for valid inputs, i.e. -17.328680 <= x <= 0.0. As n
// has 4 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in two steps:
// 1. Fetch 2**frac(n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -17.328680 <= x <= 0.0 we have -25 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -25.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const v128_t ven = wasm_i32x4_shl(vn, 19);
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
// Adjust exponent of the value l fetched from the table to get the final s value.
v128_t vs = wasm_i32x4_add(vl, ven);
// Subtract the large number back to get final n := round(x / log(2), 4).
vn = wasm_f32x4_sub(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
const v128_t vm = wasm_f32x4_le(vx, vsat_cutoff);
vs = wasm_v128_andnot(vs, vm);
vt = wasm_v128_andnot(vt, vm);
// Compute degree-3 polynomial approximation for exp(t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (1 + t * (c2 + t * c3)) = t + t * (t * (c2 + t * c3)) = t + t * p
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * c3))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = wasm_f32x4_mul(vt, vs);
const v128_t vsm1 = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t vf = wasm_f32x4_add(vp, vsm1);
wasm_v128_store(output, vf);
input += 4;
output += 4;
}
}
| 5,511
| 48.214286
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-wasmsimd-rr2-lut16-p3-max.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_math_f32_expm1minus__wasmsimd_rr2_lut16_p3_max(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const v128_t vsat_cutoff = wasm_f32x4_const_splat(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == exp2(-4)
const v128_t vmagic_bias = wasm_f32x4_const_splat(0x1.800000p19f);
const v128_t vlog2e = wasm_f32x4_const_splat(0x1.715476p+0f);
// Mask for the lowest 4 bits
const v128_t vindex_mask = wasm_i32x4_const_splat(0xF);
// Last 9 bits are zeroes
const v128_t vminus_ln2_hi = wasm_f32x4_const_splat(-0x1.62E400p-1f);
const v128_t vminus_ln2_lo = wasm_f32x4_const_splat(-0x1.7F7D1Cp-20f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * c3))
// on [-log(2)/32, log(2)/32]
const v128_t vc3 = wasm_f32x4_const_splat(0x1.55561Cp-3f);
const v128_t vc2 = wasm_f32x4_const_splat(0x1.0001ECp-1f);
const v128_t vone = wasm_f32x4_const_splat(1.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vx = wasm_f32x4_max(vx, vsat_cutoff);
// Compute reduced argument n := round(x / log(2), 4).
// We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits, then
// subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**18, i.e. |x| <= 0x1.62E43p+17 = 181704.375), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s := 2**n for valid inputs, i.e. -17.328680 <= x <= 0.0. As n
// has 4 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in two steps:
// 1. Fetch 2**frac(n) from the table using the 4 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -17.328680 <= x <= 0.0 we have -25 <= int(n) <= 0, and thus the adjusted exponent is not
// lower than -25.
//
// Shift bits 4:12 into 23:31 (position of floating-point exponent).
const v128_t ven = wasm_i32x4_shl(vn, 19);
// Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
// Adjust exponent of the value l fetched from the table to get the final s value.
const v128_t vs = wasm_i32x4_add(vl, ven);
// Subtract the large number back to get final n := round(x / log(2), 4).
vn = wasm_f32x4_sub(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
// Compute degree-3 polynomial approximation for exp(t) - 1 on [-log(2)/32, log(2)/32].
// P(t) = t * (1 + t * (c2 + t * c3)) = t + t * (t * (c2 + t * c3)) = t + t * p
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * c3))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = wasm_f32x4_mul(vt, vs);
const v128_t vsm1 = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t vf = wasm_f32x4_add(vp, vsm1);
wasm_v128_store(output, vf);
input += 4;
output += 4;
}
}
| 5,512
| 48.666667
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-wasmsimd-rr2-p6-andnot.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <wasm_simd128.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expm1minus__wasmsimd_rr2_p6_andnot(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const v128_t vmagic_bias = wasm_f32x4_const_splat(0x1.8000FEp23f);
const v128_t vlog2e = wasm_f32x4_const_splat(0x1.715476p+0f);
// The largest x for which expm1f(x) is saturated at -1.0f.
const v128_t vsat_cutoff = wasm_f32x4_const_splat(-0x1.154246p+4f);
// Last 5 bits are zeroes
const v128_t vminus_ln2_hi = wasm_f32x4_const_splat(-0x1.62E440p-1f);
const v128_t vminus_ln2_lo = wasm_f32x4_const_splat(0x1.0105C6p-21f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/2, log(2)/2]
const v128_t vc6 = wasm_f32x4_const_splat(0x1.6b7338p-10f);
const v128_t vc5 = wasm_f32x4_const_splat(0x1.12278Ep-7f);
const v128_t vc4 = wasm_f32x4_const_splat(0x1.555716p-5f);
const v128_t vc3 = wasm_f32x4_const_splat(0x1.5554B0p-3f);
const v128_t vc2 = wasm_f32x4_const_splat(0x1.FFFFFEp-2f);
const v128_t vone = wasm_f32x4_const_splat(1.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
// the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**22, i.e. |x| <= 0x1.62E43p+21 = 2907270.0), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for valid inputs, i.e.
// -17.328680 <= x <= 0.0, and -25 <= n <= 0 accordingly.
// For NaN inputs, s would have zero mantissa and can have arbitrary sign and exponent, depending on the input
// NaN payload. In these cases, n and t are NaNs with the same payload as input while s is non-NaN, and thus
// input payload would be propagated in all computations.
v128_t vs = wasm_i32x4_shl(vn, 23);
// Subtract the large number back to get final n := round(x / log(2)).
vn = wasm_f32x4_sub(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
const v128_t vm = wasm_f32x4_le(vx, vsat_cutoff);
vs = wasm_v128_andnot(vs, vm);
vt = wasm_v128_andnot(vt, vm);
// Compute degree-6 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) = t + t * p
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = wasm_f32x4_mul(vt, vs);
const v128_t vsm1 = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t vf = wasm_f32x4_add(vp, vsm1);
wasm_v128_store(output, vf);
input += 4;
output += 4;
}
}
| 4,407
| 45.4
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expm1minus-wasmsimd-rr2-p6-max.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <wasm_simd128.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expm1minus__wasmsimd_rr2_p6_max(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// The largest x for which expm1f(x) is saturated at -1.0f.
const v128_t vsat_cutoff = wasm_f32x4_const_splat(-0x1.154246p+4f);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const v128_t vmagic_bias = wasm_f32x4_const_splat(0x1.8000FEp23f);
const v128_t vlog2e = wasm_f32x4_const_splat(0x1.715476p+0f);
// Last 5 bits are zeroes
const v128_t vminus_ln2_hi = wasm_f32x4_const_splat(-0x1.62E440p-1f);
const v128_t vminus_ln2_lo = wasm_f32x4_const_splat(0x1.0105C6p-21f);
// Coefficient of polynomial approximation
// exp(t) - 1 ~ t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// on [-log(2)/2, log(2)/2]
const v128_t vc6 = wasm_f32x4_const_splat(0x1.6b7338p-10f);
const v128_t vc5 = wasm_f32x4_const_splat(0x1.12278Ep-7f);
const v128_t vc4 = wasm_f32x4_const_splat(0x1.555716p-5f);
const v128_t vc3 = wasm_f32x4_const_splat(0x1.5554B0p-3f);
const v128_t vc2 = wasm_f32x4_const_splat(0x1.FFFFFEp-2f);
const v128_t vone = wasm_f32x4_const_splat(1.0f);
for (; n != 0; n -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
// The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
// To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
// expm1f(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
vx = wasm_f32x4_max(vx, vsat_cutoff);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
// the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**22, i.e. |x| <= 0x1.62E43p+21 = 2907270.0), but that is acceptable, because inputs x are
// restricted to [-17.328680, 0].
// Note that addition-subtraction of the large number doesn't cause overflow for inputs in this range.
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for valid inputs, i.e.
// -17.328680 <= x <= 0.0, and -25 <= n <= 0 accordingly.
// For NaN inputs, s would have zero mantissa and can have arbitrary sign and exponent, depending on the input
// NaN payload. In these cases, n and t are NaNs with the same payload as input while s is non-NaN, and thus
// input payload would be propagated in all computations.
const v128_t vs = wasm_i32x4_shl(vn, 23);
// Subtract the large number back to get final n := round(x / log(2)).
vn = wasm_f32x4_sub(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
// Compute degree-6 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
// P(t) = t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
// = t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) = t + t * p
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
// Reconstruct the exp(x) - 1 value:
// exp(x) - 1 = s * (1 + t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))) - 1
// = (s - 1) + s * (t + t * p)
// = ((t * s) + (t * s) * p) + (s - 1)
vt = wasm_f32x4_mul(vt, vs);
const v128_t vsm1 = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t vf = wasm_f32x4_add(vp, vsm1);
wasm_v128_store(output, vf);
input += 4;
output += 4;
}
}
| 4,408
| 45.904255
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expminus-avx2-rr1-p5.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expminus__avx2_rr1_p5(
size_t n,
const float* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
// Coefficient of polynomial approximation
// exp(t) ~ 1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// on [-log(2)/2, log(2)/2]
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
for (; n != 0; n -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The first addition is combined with multiplication by
// log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**22, i.e. |x| <= 0x1.62E43p+21 = 2907270.0), but that is acceptable, because inputs outside
// of [-87.336540, 0.0] underflow expf(x) anyway. We fixup the result for such inputs at the very end of the
// algorithm.
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final n := round(x / log(2)) as a floating-point number.
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))) = 1 + t * p
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the exp(x) value:
// exp(x) = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm256_storeu_ps(output, vf);
input += 8;
output += 8;
}
}
| 3,468
| 41.82716
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expminus-avx2-rr2-p5.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expminus__avx2_rr2_p5(
size_t n,
const float* input,
float* output)
{
assert(n % (8 * sizeof(float)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// Coefficient of polynomial approximation
// exp(t) ~ 1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// on [-log(2)/2, log(2)/2]
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
for (; n != 0; n -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The first addition is combined with multiplication by
// log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**22, i.e. |x| <= 0x1.62E43p+21 = 2907270.0), but that is acceptable, because inputs outside
// of [-87.336540, 0.0] underflow expf(x) anyway. We fixup the result for such inputs at the very end of the
// algorithm.
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final n := round(x / log(2)) as a floating-point number.
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))) = 1 + t * p
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the exp(x) value:
// exp(x) = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm256_storeu_ps(output, vf);
input += 8;
output += 8;
}
}
| 3,693
| 42.97619
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expminus-neonfma-rr2-lut2048-p1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 2048) values decremented (as integer) by (k << 12), k = 0..2048
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_math_f32_expminus__neonfma_rr2_lut2048_p1(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// Large number such that ulp(magic bias) == exp2(-11)
const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p12f);
const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p0f);
// Mask for the lowest 11 bits
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62e43p-1f);
const float32x4_t vminus_ln2_lo = vmovq_n_f32(0x1.05c61p-29f);
// Coefficient of polynomial approximation
// exp(t) ~ 1 + t * c1
// on [-log(2)/2048, log(2)/2048]
const float32x4_t vc1 = vmovq_n_f32(0x1.FFFFFEp-1f);
// The smallest x for which expf(x) is normalized.
const float32x4_t vdenorm_cutoff = vmovq_n_f32(-0x1.5D589Ep6f);
for (; n != 0; n -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
// Compute reduced argument n := round(x / log(2), 11).
// We do it by adding a large number (magic bias), which cause rounding of the result to 11 fractional bits, then
// subtracing the large number back. The first addition is combined with multiplication by log2e into a single FMA
// instruction. The trick with adding large number is valid only within certain bounds (|x / log(2)| <= 2**11, i.e.
// |x| <= 0x1.62E43p+10 = 1419.5654296875), but that is acceptable, because inputs x outside of [-87.336544, 0]
// underflow expf(x). We fixup the result for such inputs at the very end of the algorithm.
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
// Create a floating-point number s (scale) such that s := 2**n for such inputs that expf(x) is normalized, i.e.
// -87.336544 <= x <= 0. As n has 11 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in
// two steps:
// 1. Fetch 2**frac(n) from the table using the 11 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0 (inputs for which expf(x) is normalized) we have -126 <= int(n) <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Shift bits 11:19 into 23:31 (position of floating-point exponent).
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
// Use bits 0:11 of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const uint64_t vidx01 = vgetq_lane_u64(vidx, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx, 1);
float32x2_t vl01 = vld1_dup_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_2048 + (uint32_t) vidx01));
float32x2_t vl23 = vld1_dup_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_2048 + (uint32_t) vidx23));
vl01 = vld1_lane_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_2048 + (uint32_t) (vidx01 >> 32)), vl01, 1);
vl23 = vld1_lane_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_2048 + (uint32_t) (vidx23 >> 32)), vl23, 1);
const float32x4_t vl = vcombine_f32(vl01, vl23);
// Adjust exponent of the value l fetched from the table to get the final s value.
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
// Subtract the large number back to get final n := round(x / log(2), 11) as a floating-point number.
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2)
// Use Cody-Waite range reduction method (note the two constants representing log(2)) to improve accuracy.
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2_hi);
vt = vfmaq_f32(vt, vn, vminus_ln2_lo);
// Compute degree-1 polynomial approximation for exp(t) on [-log(2)/2048, log(2)/2048].
// P(t) = 1 + t * c1 = 1 + t * c1 = 1 + p
const float32x4_t vp = vmulq_f32(vt, vc1);
// Reconstruct the exp(x) value:
// exp(x) = s * (1 + t * c1)
// = s * (1 + p)
// = s + s * p
float32x4_t vf = vfmaq_f32(vs, vs, vp);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
}
}
| 5,082
| 50.867347
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expminus-neonfma-rr2-lut64-p2.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 64) values decremented (as integer) by (k << 17), k = 0..63
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_math_f32_expminus__neonfma_rr2_lut64_p2(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// Large number such that ulp(magic bias) == exp2(-6)
const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p17f);
const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p0f);
// Mask for the lowest 6 bits
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62e43p-1f);
const float32x4_t vminus_ln2_lo = vmovq_n_f32(0x1.05c61p-29f);
// Coefficient of polynomial approximation
// exp(t) ~ 1 + t * (1 + t * c2)
// on [-log(2)/128, log(2)/128]
const float32x4_t vc2 = vmovq_n_f32(0x1.FFFF0Ap-2f);
// The smallest x for which expf(x) is normalized.
const float32x4_t vdenorm_cutoff = vmovq_n_f32(-0x1.5D589Ep6f);
for (; n != 0; n -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
// Compute reduced argument n := round(x / log(2), 6).
// We do it by adding a large number (magic bias), which cause rounding of the result to 6 fractional bits, then
// subtracing the large number back. The first addition is combined with multiplication by log2e into a single FMA
// instruction. The trick with adding large number is valid only within certain bounds (|x / log(2)| <= 2**16, i.e.
// |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs x outside of [-87.336544, 0]
// underflow expf(x). We fixup the result for such inputs at the very end of the algorithm.
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
// Create a floating-point number s (scale) such that s := 2**n for such inputs that expf(x) is normalized, i.e.
// -87.336544 <= x <= 0. As n has 6 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in
// two steps:
// 1. Fetch 2**frac(n) from the table using the 6 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0 (inputs for which expf(x) is normalized) we have -126 <= int(n) <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Shift bits 6:14 into 23:31 (position of floating-point exponent).
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
// Use bits 0:6 of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const uint64_t vidx01 = vgetq_lane_u64(vidx, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx, 1);
float32x2_t vl01 = vld1_dup_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx01));
float32x2_t vl23 = vld1_dup_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx23));
vl01 = vld1_lane_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx01 >> 32)), vl01, 1);
vl23 = vld1_lane_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx23 >> 32)), vl23, 1);
const float32x4_t vl = vcombine_f32(vl01, vl23);
// Adjust exponent of the value l fetched from the table to get the final s value.
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
// Subtract the large number back to get the final n := round(x / log(2), 6) as a floating-point number.
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2)
// Use Cody-Waite range reduction method (note the two constants representing log(2)) to improve accuracy.
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2_hi);
vt = vfmaq_f32(vt, vn, vminus_ln2_lo);
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
// P(t) = 1 + t * (1 + t * c2) = 1 + (t + t * (t * c2)) = 1 + p
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
// Reconstruct the exp(x) value:
// exp(x) = s * (1 + t * (1 + t * c2))
// = s * (1 + p)
// = s + s * p
float32x4_t vf = vfmaq_f32(vs, vs, vp);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
}
}
| 5,118
| 50.707071
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expminus-neonfma-rr2-p5.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expminus__neonfma_rr2_p5(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E43p-1f);
const float32x4_t vminus_ln2_lo = vmovq_n_f32(0x1.05C61p-29f);
// Coefficient of polynomial approximation
// exp(t) ~ 1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// on [-log(2)/2, log(2)/2]
const float32x4_t vc1 = vmovq_n_f32(0x1.FFFFF6p-1f);
const float32x4_t vc2 = vmovq_n_f32(0x1.FFFDC6p-2f);
const float32x4_t vc3 = vmovq_n_f32(0x1.555A80p-3f);
const float32x4_t vc4 = vmovq_n_f32(0x1.573A1Ap-5f);
const float32x4_t vc5 = vmovq_n_f32(0x1.0F9F9Cp-7f);
// The smallest x for which expf(x) is normalized.
const float32x4_t vdenorm_cutoff = vmovq_n_f32(-0x1.5D589Ep6f);
for (; n != 0; n -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The first addition is combined with multiplication by
// log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**22, i.e. |x| <= 0x1.62E43p+21 = 2907270.0), but that is acceptable, because inputs outside
// of [-87.336540, 0.0] underflow expf(x) anyway. We fixup the result for such inputs at the very end of the
// algorithm.
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
// Subtract the large number back to get final n := round(x / log(2)) as a floating-point number.
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2_hi);
vt = vfmaq_f32(vt, vn, vminus_ln2_lo);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))) = 1 + t * p
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
// Reconstruct the exp(x) value:
// exp(x) = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
}
}
| 3,690
| 44.567901
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expminus-scalar-rr2-lut2048-p1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 2048) values decremented (as integer) by (k << 12), k = 0..2048
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_2048[2048];
void xnn_math_f32_expminus__scalar_rr2_lut2048_p1(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// Large number such that ulp(magic bias) == exp2(-11)
const float vmagic_bias = 0x1.800000p12f;
const float vlog2e = 0x1.715476p0f;
// Mask for the lowest 11 bits
const uint32_t vindex_mask = UINT32_C(0x7FF);
// Last 18 bits are zeroes
const float vminus_ln2_hi = -0x1.600000p-1f;
const float vminus_ln2_lo = -0x1.7217F8p-8f;
// Coefficient of polynomial approximation
// exp(t) ~ 1 + t * c1
// on [-log(2)/2048, log(2)/2048]
const float vc1 = 0x1.FFFFFEp-1f;
// The smallest x for which expf(x) is normalized.
const float vdenorm_cutoff = -0x1.5D589Ep6f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// Compute reduced argument n := round(x / log(2), 11).
// We do it by adding a large number (magic bias), which cause rounding of the result to 11 fractional bits, then
// subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**11, i.e. |x| <= 0x1.62E43p+10 = 1419.5654296875), but that is acceptable, because inputs x
// outside of [-87.336544, 0] underflow expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**n for such inputs that expf(x) is normalized, i.e.
// -87.336544 <= x <= 0. As n has 11 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in
// two steps:
// 1. Fetch 2**frac(n) from the table using the 11 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0 (inputs for which expf(x) is normalized) we have -126 <= int(n) <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Shift bits 11:19 into 23:31 (position of floating-point exponent).
const uint32_t ve = float_as_uint32(vn) << 12;
// Use bits 0:11 of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(xnn_table_exp2minus_k_over_2048[vidx] + ve);
// Subtract the large number back to get final n := round(x / log(2), 11) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2)
// Use Cody-Waite range reduction method (note the two constants representing log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-1 polynomial approximation for exp(t) on [-log(2)/2048, log(2)/2048].
// P(t) = 1 + t * c1 = 1 + t * c1 = 1 + p
const float vp = vt * vc1;
// Reconstruct the exp(x) value:
// exp(x) = s * (1 + t * c1)
// = s * (1 + p)
// = s + s * p
float vf = vp * vs + vs;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
vf = 0.0f;
}
*output++ = vf;
}
}
| 3,999
| 41.553191
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expminus-scalar-rr2-lut64-p2.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
// Table of exp2(k / 64) values decremented (as integer) by (k << 17), k = 0..63
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_64[64];
void xnn_math_f32_expminus__scalar_rr2_lut64_p2(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// Large number such that ulp(magic bias) == exp2(-6)
const float vmagic_bias = 0x1.800000p17f;
const float vlog2e = 0x1.715476p0f;
// Mask for the lowest 6 bits
const uint32_t vindex_mask = UINT32_C(0x3F);
// Last 13 bits are zeroes
const float vminus_ln2_hi = -0x1.630000p-1f;
const float vminus_ln2_lo = 0x1.BD0106p-13f;
// Coefficient of polynomial approximation
// exp(t) ~ 1 + t * (1 + t * c2)
// on [-log(2)/128, log(2)/128]
const float vc2 = 0x1.FFFF0Ap-2f;
// The smallest x for which expf(x) is normalized.
const float vdenorm_cutoff = -0x1.5D589Ep6f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// Compute reduced argument n := round(x / log(2), 6).
// We do it by adding a large number (magic bias), which cause rounding of the result to 6 fractional bits, then
// subtracing the large number back. The trick with adding large number is valid only within certain bounds
// (|x / log(2)| <= 2**16, i.e. |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs x
// outside of [-87.336544, 0] underflow expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**n for such inputs that expf(x) is normalized, i.e.
// -87.336544 <= x <= 0. As n has 6 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s in
// two steps:
// 1. Fetch 2**frac(n) from the table using the 6 low bits of n, as integer. Note that the fetched values are in
// the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0 (inputs for which expf(x) is normalized) we have -126 <= int(n) <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Shift bits 6:14 into 23:31 (position of floating-point exponent).
const uint32_t ve = float_as_uint32(vn) << 17;
// Use bits 0:6 of n, as integer, as an index for table lookup of l := 2**frac(n).
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx] + ve);
// Subtract the large number back to get the final n := round(x / log(2), 6) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2)
// Use Cody-Waite range reduction method (note the two constants representing log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
// P(t) = 1 + t * (1 + t * c2) = 1 + (t + t * (t * c2)) = 1 + p
float vp = vt * vc2;
vp = vp * vt + vt;
// Reconstruct the exp(x) value:
// exp(x) = s * (1 + t * (1 + t * c2))
// = s * (1 + p)
// = s + s * p
float vf = vp * vs + vs;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
vf = 0.0f;
}
*output++ = vf;
}
}
| 4,033
| 41.463158
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expminus-scalar-rr2-p5.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expminus__scalar_rr2_p5(
size_t n,
const float* input,
float* output)
{
assert(n % sizeof(float) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const float vmagic_bias = 0x1.8000FEp23f;
const float vlog2e = 0x1.715476p+0f;
// Last 7 bits are zeroes
const float vminus_ln2_hi = -0x1.62E400p-1f;
const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
// Coefficient of polynomial approximation
// exp(t) ~ 1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// on [-log(2)/2, log(2)/2]
const float vc5 = 0x1.0F9F9Cp-7f;
const float vc4 = 0x1.573A1Ap-5f;
const float vc3 = 0x1.555A80p-3f;
const float vc2 = 0x1.FFFDC6p-2f;
const float vc1 = 0x1.FFFFF6p-1f;
// The smallest x for which expf(x) is normalized.
const float vdenorm_cutoff = -0x1.5D589Ep6f;
for (; n != 0; n -= sizeof(float)) {
const float vx = *input++;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The trick with adding large number is valid only within
// certain bounds (|x / log(2)| <= 2**22, i.e. |x| <= 0x1.62E43p+21 = 2907270.0), but that is acceptable, because
// inputs outside of [-87.336540, 0.0] underflow expf(x) anyway. We fixup the result for such inputs at the very
// end of the algorithm.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(x / log(2)) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))) = 1 + t * p
float vp = vc5 * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vc1;
// Reconstruct the exp(x) value:
// exp(x) = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt *= vs;
float vf = vt * vp + vs;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
vf = 0.0f;
}
*output++ = vf;
}
}
| 3,225
| 37.404762
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-expminus-sse2-rr2-p5.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <emmintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_expminus__sse2_rr2_p5(
size_t n,
const float* input,
float* output)
{
assert(n % (4 * sizeof(float)) == 0);
// Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f);
const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
// Last 7 bits are zeroes
const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
// Coefficient of polynomial approximation
// exp(t) ~ 1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// on [-log(2)/2, log(2)/2]
const __m128 vc1 = _mm_set1_ps(0x1.FFFFF6p-1f);
const __m128 vc2 = _mm_set1_ps(0x1.FFFDC6p-2f);
const __m128 vc3 = _mm_set1_ps(0x1.555A80p-3f);
const __m128 vc4 = _mm_set1_ps(0x1.573A1Ap-5f);
const __m128 vc5 = _mm_set1_ps(0x1.0F9F9Cp-7f);
// The smallest x for which expf(x) is normalized.
const __m128 vdenorm_cutoff = _mm_set1_ps(-0x1.5D589Ep6f);
for (; n != 0; n -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The trick with adding large number is valid only within
// certain bounds (|x / log(2)| <= 2**22, i.e. |x| <= 0x1.62E43p+21 = 2907270.0), but that is acceptable, because
// inputs outside of [-87.336540, 0.0] underflow expf(x) anyway. We fixup the result for such inputs at the very
// end of the algorithm.
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final n := round(x / log(2)) as a floating-point number.
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2]:
// P(t) = 1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))) = 1 + t * p
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the exp(x) value:
// exp(x) = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
}
| 3,617
| 42.071429
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-extexp-avx2-p5.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_extexp__avx2_p5(
size_t n,
const float* input,
float* output_mantissa,
float* output_exponent)
{
assert(n % (8 * sizeof(float)) == 0);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
for (; n != 0; n -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
// Compute reduced argument n := round(x / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Skip reconstruction step and separately store "mantissa" and "exponent" of the output.
_mm256_storeu_ps(output_mantissa, vp);
_mm256_storeu_ps(output_exponent, vn);
input += 8;
output_mantissa += 8;
output_exponent += 8;
}
}
| 1,997
| 32.3
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-extexp-avx512f-p5.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_extexp__avx512f_p5(
size_t n,
const float* input,
float* output_mantissa,
float* output_exponent)
{
assert(n % (16 * sizeof(float)) == 0);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
for (; n != 0; n -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
// Compute reduced argument n := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Skip reconstruction step and separately store "mantissa" and "exponent" of the output.
_mm512_storeu_ps(output_mantissa, vp);
_mm512_storeu_ps(output_exponent, vn);
input += 16;
output_mantissa += 16;
output_exponent += 16;
}
}
| 1,966
| 31.783333
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-f16-cvt-f16c.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_f16_cvt__f16c(
size_t n,
const float* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m128i vy = _mm256_cvtps_ph(vx, _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, vy);
o += 8;
}
}
| 694
| 20.060606
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-f16-cvt-neon.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_f16_cvt__neon(
size_t n,
const float* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
const uint32x4_t vexp_bias = vdupq_n_u32(UINT32_C(0x07800000));
const float32x4_t vscale_to_inf = vdupq_n_f32(0x1.0p+112f);
const uint32x4_t vexpw_max = vdupq_n_u32(UINT32_C(0x7F800000));
const float32x4_t vscale_to_zero = vdupq_n_f32(0x1.0p-110f);
const uint32x4_t vbias_min = vdupq_n_u32(UINT32_C(0x40000000));
const uint16x8_t vexph_mask = vdupq_n_u16(UINT16_C(0x7C00));
const uint16x8_t vmanth_mask = vdupq_n_u16(UINT16_C(0x0FFF));
const uint16x8_t vsignh_mask = vdupq_n_u16(UINT16_C(0x8000));
const uint16x8_t vnanh = vdupq_n_u16(UINT16_C(0x7E00));
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const float32x4_t vx_lo = vld1q_f32(input); input += 4;
const float32x4_t vx_hi = vld1q_f32(input); input += 4;
const float32x4_t vabsx_lo = vabsq_f32(vx_lo);
const float32x4_t vabsx_hi = vabsq_f32(vx_hi);
uint32x4_t vbias_lo = vaddq_u32(vreinterpretq_u32_f32(vabsx_lo), vexp_bias);
uint32x4_t vbias_hi = vaddq_u32(vreinterpretq_u32_f32(vabsx_hi), vexp_bias);
float32x4_t vf_lo = vmulq_f32(vabsx_lo, vscale_to_inf);
float32x4_t vf_hi = vmulq_f32(vabsx_hi, vscale_to_inf);
const uint32x4_t vnanmaskw_lo = vcgtq_u32(vreinterpretq_u32_f32(vabsx_lo), vexpw_max);
const uint32x4_t vnanmaskw_hi = vcgtq_u32(vreinterpretq_u32_f32(vabsx_hi), vexpw_max);
vbias_lo = vandq_u32(vbias_lo, vexpw_max);
vbias_hi = vandq_u32(vbias_hi, vexpw_max);
vf_lo = vmulq_f32(vf_lo, vscale_to_zero);
vf_hi = vmulq_f32(vf_hi, vscale_to_zero);
const uint16x8_t vnanmaskh = vcombine_u16(vmovn_u32(vnanmaskw_lo), vmovn_u32(vnanmaskw_hi));
vbias_lo = vmaxq_u32(vbias_lo, vbias_min);
vbias_hi = vmaxq_u32(vbias_hi, vbias_min);
vf_lo = vaddq_f32(vf_lo, vreinterpretq_f32_u32(vbias_lo));
vf_hi = vaddq_f32(vf_hi, vreinterpretq_f32_u32(vbias_hi));
uint16x8_t vexph = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf_lo), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf_hi), 13));
uint16x8_t vmanth = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf_lo)), vmovn_u32(vreinterpretq_u32_f32(vf_hi)));
uint16x8_t vsignh = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx_lo), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx_hi), 16));
vexph = vandq_u16(vexph, vexph_mask);
vmanth = vandq_u16(vmanth, vmanth_mask);
vsignh = vandq_u16(vsignh, vsignh_mask);
uint16x8_t vh = vaddq_u16(vmanth, vexph);
vh = vbslq_u16(vnanmaskh, vnanh, vh);
vh = vorrq_u16(vh, vsignh);
vst1q_u16(o, vh); o += 8;
}
}
| 2,943
| 38.253333
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-f16-cvt-neonfp16.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_f16_cvt__neonfp16(
size_t n,
const float* input,
void* output)
{
assert(n % (4 * sizeof(uint16_t)) == 0);
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 4 * sizeof(uint16_t)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const uint16x4_t vy = vreinterpret_u16_f16(vcvt_f16_f32(vx));
vst1_u16(o, vy); o += 4;
}
}
| 662
| 21.862069
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-f16-cvt-scalar-bitcast.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_f16_cvt__scalar_bitcast(
size_t n,
const float* input,
void* output)
{
assert(n % (sizeof(uint16_t)) == 0);
const uint32_t vnonsign_mask = UINT32_C(0x7FFFFFFF);
const uint32_t vexp_bias = UINT32_C(0x07800000);
const float vscale_to_inf = 0x1.0p+112f;
const uint32_t vexpw_max = UINT32_C(0x7F800000);
const float vscale_to_zero = 0x1.0p-110f;
const uint32_t vbias_min = UINT32_C(0x40000000);
const uint16_t vexph_mask = UINT16_C(0x7C00);
const uint16_t vmanth_mask = UINT16_C(0x0FFF);
const uint16_t vnanh = UINT16_C(0x7E00);
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= sizeof(uint16_t)) {
const float vx = *input++;
const uint32_t vw = float_as_uint32(vx);
const uint32_t vnonsignw = vw & vnonsign_mask;
float vf = uint32_as_float(vnonsignw);
const uint32_t vsignw = vw ^ vnonsignw;
uint32_t vbias = vnonsignw + vexp_bias;
vf *= vscale_to_inf;
vbias &= vexpw_max;
vf *= vscale_to_zero;
vbias = math_max_u32(vbias, vbias_min);
vf += uint32_as_float(vbias);
const uint32_t vbits = float_as_uint32(vf);
const uint16_t vexph = (uint16_t) (vbits >> 13) & vexph_mask;
const uint16_t vmanth = (uint16_t) vbits & vmanth_mask;
const uint16_t vsignh = (uint16_t) (vsignw >> 16);
uint16_t vh = vexph + vmanth;
if XNN_UNPREDICTABLE(vnonsignw > vexpw_max) {
vh = vnanh;
}
vh |= vsignh;
*o++ = vh;
}
}
| 1,755
| 25.606061
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/math/f32-f16-cvt-scalar-fabsf.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_f16_cvt__scalar_fabsf(
size_t n,
const float* input,
void* output)
{
assert(n % (sizeof(uint16_t)) == 0);
const float vscale_to_inf = 0x1.0p+112f;
const uint32_t vexp_bias = UINT32_C(0x07800000);
const float vscale_to_zero = 0x1.0p-110f;
const uint32_t vexpw_max = UINT32_C(0x7F800000);
const uint32_t vbias_min = UINT32_C(0x40000000);
const uint16_t vexph_mask = UINT16_C(0x7C00);
const uint16_t vmanth_mask = UINT16_C(0x0FFF);
const uint16_t vnanh = UINT16_C(0x7E00);
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= sizeof(uint16_t)) {
const float vx = *input++;
const float vabsx = fabsf(vx);
uint32_t vsignw = float_as_uint32(vx);
const uint32_t vnonsignw = float_as_uint32(vabsx);
float vf = vabsx * vscale_to_inf;
uint32_t vbias = vnonsignw + vexp_bias;
vsignw ^= vnonsignw;
vf *= vscale_to_zero;
vbias &= vexpw_max;
vbias = math_max_u32(vbias, vbias_min);
vf += uint32_as_float(vbias);
const uint32_t vbits = float_as_uint32(vf);
const uint16_t vexph = (uint16_t) (vbits >> 13) & vexph_mask;
const uint16_t vmanth = (uint16_t) vbits & vmanth_mask;
const uint16_t vsignh = (uint16_t) (vsignw >> 16);
uint16_t vh = vexph + vmanth;
if XNN_UNPREDICTABLE(vnonsignw > vexpw_max) {
vh = vnanh;
}
vh |= vsignh;
*o++ = vh;
}
}
| 1,687
| 24.575758
| 72
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.