|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef _ARM_UTILS_HELIUM_H_ |
|
|
#define _ARM_UTILS_HELIUM_H_ |
|
|
|
|
|
|
|
|
#ifdef __cplusplus |
|
|
extern "C" |
|
|
{ |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI)) && !defined(ARM_MATH_AUTOVECTORIZE) |
|
|
|
|
|
#define INACTIVELANE 0 |
|
|
|
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF)) && !defined(ARM_MATH_AUTOVECTORIZE) |
|
|
|
|
|
__STATIC_FORCEINLINE float32_t vecAddAcrossF32Mve(float32x4_t in) |
|
|
{ |
|
|
float32_t acc; |
|
|
|
|
|
acc = vgetq_lane(in, 0) + vgetq_lane(in, 1) + |
|
|
vgetq_lane(in, 2) + vgetq_lane(in, 3); |
|
|
|
|
|
return acc; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define INVSQRT_MAGIC_F32 0x5f3759df |
|
|
#define INV_NEWTON_INIT_F32 0x7EF127EA |
|
|
|
|
|
|
|
|
#define INVSQRT_NEWTON_MVE_F32(invSqrt, xHalf, xStart)\ |
|
|
{ \ |
|
|
float32x4_t tmp; \ |
|
|
\ |
|
|
\ |
|
|
tmp = vmulq(xStart, xStart); \ |
|
|
tmp = vmulq(tmp, xHalf); \ |
|
|
\ |
|
|
tmp = vsubq(vdupq_n_f32(1.5f), tmp); \ |
|
|
\ |
|
|
invSqrt = vmulq(tmp, xStart); \ |
|
|
} |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if defined(ARM_FLOAT16_SUPPORTED) |
|
|
#if defined (ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) |
|
|
|
|
|
__STATIC_FORCEINLINE float16_t vecAddAcrossF16Mve(float16x8_t in) |
|
|
{ |
|
|
float16x8_t tmpVec; |
|
|
_Float16 acc; |
|
|
|
|
|
tmpVec = (float16x8_t) vrev32q_s16((int16x8_t) in); |
|
|
in = vaddq_f16(tmpVec, in); |
|
|
tmpVec = (float16x8_t) vrev64q_s32((int32x4_t) in); |
|
|
in = vaddq_f16(tmpVec, in); |
|
|
acc = (_Float16)vgetq_lane_f16(in, 0) + (_Float16)vgetq_lane_f16(in, 4); |
|
|
|
|
|
return acc; |
|
|
} |
|
|
|
|
|
__STATIC_FORCEINLINE float16x8_t __mve_cmplx_sum_intra_vec_f16( |
|
|
float16x8_t vecIn) |
|
|
{ |
|
|
float16x8_t vecTmp, vecOut; |
|
|
uint32_t tmp; |
|
|
|
|
|
vecTmp = (float16x8_t) vrev64q_s32((int32x4_t) vecIn); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vecTmp = vaddq_f16(vecTmp, vecIn); |
|
|
vecOut = vecTmp; |
|
|
|
|
|
|
|
|
|
|
|
vecOut = vreinterpretq_f16_s32(vshlcq_s32(vreinterpretq_s32_f16(vecOut) , &tmp, 32)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vecOut = vaddq_f16(vecOut, vecTmp); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return vecOut; |
|
|
} |
|
|
|
|
|
|
|
|
#define mve_cmplx_sum_intra_r_i_f16(vec, Re, Im) \ |
|
|
{ \ |
|
|
float16x8_t vecOut = __mve_cmplx_sum_intra_vec_f16(vec); \ |
|
|
Re = vgetq_lane(vecOut, 4); \ |
|
|
Im = vgetq_lane(vecOut, 5); \ |
|
|
} |
|
|
|
|
|
__STATIC_FORCEINLINE void mve_cmplx_sum_intra_vec_f16( |
|
|
float16x8_t vecIn, |
|
|
float16_t *pOut) |
|
|
{ |
|
|
float16x8_t vecOut = __mve_cmplx_sum_intra_vec_f16(vecIn); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*(float32_t *) pOut = ((float32x4_t) vecOut)[2]; |
|
|
} |
|
|
|
|
|
|
|
|
#define INVSQRT_MAGIC_F16 0x59ba |
|
|
|
|
|
|
|
|
#define INVSQRT_NEWTON_MVE_F16(invSqrt, xHalf, xStart) \ |
|
|
{ \ |
|
|
float16x8_t tmp; \ |
|
|
\ |
|
|
\ |
|
|
tmp = vmulq(xStart, xStart); \ |
|
|
tmp = vmulq(tmp, xHalf); \ |
|
|
\ |
|
|
tmp = vsubq(vdupq_n_f16((float16_t)1.5), tmp); \ |
|
|
\ |
|
|
invSqrt = vmulq(tmp, xStart); \ |
|
|
} |
|
|
|
|
|
#endif |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI)) && !defined(ARM_MATH_AUTOVECTORIZE) |
|
|
|
|
|
__STATIC_INLINE arm_status arm_mat_trans_32bit_2x2_mve( |
|
|
uint32_t * pDataSrc, |
|
|
uint32_t * pDataDest) |
|
|
{ |
|
|
static const uint32x4_t vecOffs = { 0, 2, 1, 3 }; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uint32x4_t vecIn = vldrwq_u32((uint32_t const *)pDataSrc); |
|
|
vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs, vecIn); |
|
|
|
|
|
return (ARM_MATH_SUCCESS); |
|
|
} |
|
|
|
|
|
__STATIC_INLINE arm_status arm_mat_trans_32bit_3x3_mve( |
|
|
uint32_t * pDataSrc, |
|
|
uint32_t * pDataDest) |
|
|
{ |
|
|
const uint32x4_t vecOffs1 = { 0, 3, 6, 1}; |
|
|
const uint32x4_t vecOffs2 = { 4, 7, 2, 5}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uint32x4_t vecIn1 = vldrwq_u32((uint32_t const *) pDataSrc); |
|
|
uint32x4_t vecIn2 = vldrwq_u32((uint32_t const *) &pDataSrc[4]); |
|
|
|
|
|
vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs1, vecIn1); |
|
|
vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs2, vecIn2); |
|
|
|
|
|
pDataDest[8] = pDataSrc[8]; |
|
|
|
|
|
return (ARM_MATH_SUCCESS); |
|
|
} |
|
|
|
|
|
__STATIC_INLINE arm_status arm_mat_trans_32bit_4x4_mve(uint32_t * pDataSrc, uint32_t * pDataDest) |
|
|
{ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uint32x4x4_t vecIn; |
|
|
|
|
|
vecIn = vld4q((uint32_t const *) pDataSrc); |
|
|
vstrwq(pDataDest, vecIn.val[0]); |
|
|
pDataDest += 4; |
|
|
vstrwq(pDataDest, vecIn.val[1]); |
|
|
pDataDest += 4; |
|
|
vstrwq(pDataDest, vecIn.val[2]); |
|
|
pDataDest += 4; |
|
|
vstrwq(pDataDest, vecIn.val[3]); |
|
|
|
|
|
return (ARM_MATH_SUCCESS); |
|
|
} |
|
|
|
|
|
|
|
|
__STATIC_INLINE arm_status arm_mat_trans_32bit_generic_mve( |
|
|
uint16_t srcRows, |
|
|
uint16_t srcCols, |
|
|
uint32_t * pDataSrc, |
|
|
uint32_t * pDataDest) |
|
|
{ |
|
|
uint32x4_t vecOffs; |
|
|
uint32_t i; |
|
|
uint32_t blkCnt; |
|
|
uint32_t const *pDataC; |
|
|
uint32_t *pDataDestR; |
|
|
uint32x4_t vecIn; |
|
|
|
|
|
vecOffs = vidupq_u32((uint32_t)0, 1); |
|
|
vecOffs = vecOffs * srcCols; |
|
|
|
|
|
i = srcCols; |
|
|
do |
|
|
{ |
|
|
pDataC = (uint32_t const *) pDataSrc; |
|
|
pDataDestR = pDataDest; |
|
|
|
|
|
blkCnt = srcRows >> 2; |
|
|
while (blkCnt > 0U) |
|
|
{ |
|
|
vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs); |
|
|
vstrwq(pDataDestR, vecIn); |
|
|
pDataDestR += 4; |
|
|
pDataC = pDataC + srcCols * 4; |
|
|
|
|
|
|
|
|
|
|
|
blkCnt--; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
blkCnt = srcRows & 3; |
|
|
if (blkCnt > 0U) |
|
|
{ |
|
|
mve_pred16_t p0 = vctp32q(blkCnt); |
|
|
vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs); |
|
|
vstrwq_p(pDataDestR, vecIn, p0); |
|
|
} |
|
|
|
|
|
pDataSrc += 1; |
|
|
pDataDest += srcRows; |
|
|
} |
|
|
while (--i); |
|
|
|
|
|
return (ARM_MATH_SUCCESS); |
|
|
} |
|
|
|
|
|
__STATIC_INLINE arm_status arm_mat_cmplx_trans_32bit( |
|
|
uint16_t srcRows, |
|
|
uint16_t srcCols, |
|
|
uint32_t *pDataSrc, |
|
|
uint16_t dstRows, |
|
|
uint16_t dstCols, |
|
|
uint32_t *pDataDest) |
|
|
{ |
|
|
uint32_t i; |
|
|
uint32_t const *pDataC; |
|
|
uint32_t *pDataRow; |
|
|
uint32_t *pDataDestR, *pDataDestRow; |
|
|
uint32x4_t vecOffsRef, vecOffsCur; |
|
|
uint32_t blkCnt; |
|
|
uint32x4_t vecIn; |
|
|
|
|
|
#ifdef ARM_MATH_MATRIX_CHECK |
|
|
|
|
|
|
|
|
|
|
|
if ((srcRows != dstCols) || (srcCols != dstRows)) |
|
|
{ |
|
|
|
|
|
|
|
|
|
|
|
return = ARM_MATH_SIZE_MISMATCH; |
|
|
} |
|
|
#else |
|
|
(void)dstRows; |
|
|
(void)dstCols; |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
vecOffsRef[0] = 0; |
|
|
vecOffsRef[1] = 1; |
|
|
vecOffsRef[2] = srcCols << 1; |
|
|
vecOffsRef[3] = (srcCols << 1) + 1; |
|
|
|
|
|
pDataRow = pDataSrc; |
|
|
pDataDestRow = pDataDest; |
|
|
i = srcCols; |
|
|
do |
|
|
{ |
|
|
pDataC = (uint32_t const *) pDataRow; |
|
|
pDataDestR = pDataDestRow; |
|
|
vecOffsCur = vecOffsRef; |
|
|
|
|
|
blkCnt = (srcRows * CMPLX_DIM) >> 2; |
|
|
while (blkCnt > 0U) |
|
|
{ |
|
|
vecIn = vldrwq_gather_shifted_offset(pDataC, vecOffsCur); |
|
|
vstrwq(pDataDestR, vecIn); |
|
|
pDataDestR += 4; |
|
|
vecOffsCur = vaddq(vecOffsCur, (srcCols << 2)); |
|
|
|
|
|
|
|
|
|
|
|
blkCnt--; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
blkCnt = (srcRows * CMPLX_DIM) & 3; |
|
|
if (blkCnt > 0U) |
|
|
{ |
|
|
mve_pred16_t p0 = vctp32q(blkCnt); |
|
|
vecIn = vldrwq_gather_shifted_offset(pDataC, vecOffsCur); |
|
|
vstrwq_p(pDataDestR, vecIn, p0); |
|
|
} |
|
|
|
|
|
pDataRow += CMPLX_DIM; |
|
|
pDataDestRow += (srcRows * CMPLX_DIM); |
|
|
} |
|
|
while (--i); |
|
|
|
|
|
return (ARM_MATH_SUCCESS); |
|
|
} |
|
|
|
|
|
__STATIC_INLINE arm_status arm_mat_trans_16bit_2x2(uint16_t * pDataSrc, uint16_t * pDataDest) |
|
|
{ |
|
|
pDataDest[0] = pDataSrc[0]; |
|
|
pDataDest[3] = pDataSrc[3]; |
|
|
pDataDest[2] = pDataSrc[1]; |
|
|
pDataDest[1] = pDataSrc[2]; |
|
|
|
|
|
return (ARM_MATH_SUCCESS); |
|
|
} |
|
|
|
|
|
__STATIC_INLINE arm_status arm_mat_trans_16bit_3x3_mve(uint16_t * pDataSrc, uint16_t * pDataDest) |
|
|
{ |
|
|
static const uint16_t stridesTr33[8] = { 0, 3, 6, 1, 4, 7, 2, 5 }; |
|
|
uint16x8_t vecOffs1; |
|
|
uint16x8_t vecIn1; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vecOffs1 = vldrhq_u16((uint16_t const *) stridesTr33); |
|
|
vecIn1 = vldrhq_u16((uint16_t const *) pDataSrc); |
|
|
|
|
|
vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs1, vecIn1); |
|
|
|
|
|
pDataDest[8] = pDataSrc[8]; |
|
|
|
|
|
return (ARM_MATH_SUCCESS); |
|
|
} |
|
|
|
|
|
|
|
|
__STATIC_INLINE arm_status arm_mat_trans_16bit_4x4_mve(uint16_t * pDataSrc, uint16_t * pDataDest) |
|
|
{ |
|
|
static const uint16_t stridesTr44_1[8] = { 0, 4, 8, 12, 1, 5, 9, 13 }; |
|
|
static const uint16_t stridesTr44_2[8] = { 2, 6, 10, 14, 3, 7, 11, 15 }; |
|
|
uint16x8_t vecOffs1, vecOffs2; |
|
|
uint16x8_t vecIn1, vecIn2; |
|
|
uint16_t const * pDataSrcVec = (uint16_t const *) pDataSrc; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vecOffs1 = vldrhq_u16((uint16_t const *) stridesTr44_1); |
|
|
vecOffs2 = vldrhq_u16((uint16_t const *) stridesTr44_2); |
|
|
vecIn1 = vldrhq_u16(pDataSrcVec); |
|
|
pDataSrcVec += 8; |
|
|
vecIn2 = vldrhq_u16(pDataSrcVec); |
|
|
|
|
|
vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs1, vecIn1); |
|
|
vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs2, vecIn2); |
|
|
|
|
|
|
|
|
return (ARM_MATH_SUCCESS); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
__STATIC_INLINE arm_status arm_mat_trans_16bit_generic( |
|
|
uint16_t srcRows, |
|
|
uint16_t srcCols, |
|
|
uint16_t * pDataSrc, |
|
|
uint16_t * pDataDest) |
|
|
{ |
|
|
uint16x8_t vecOffs; |
|
|
uint32_t i; |
|
|
uint32_t blkCnt; |
|
|
uint16_t const *pDataC; |
|
|
uint16_t *pDataDestR; |
|
|
uint16x8_t vecIn; |
|
|
|
|
|
vecOffs = vidupq_u16((uint32_t)0, 1); |
|
|
vecOffs = vecOffs * srcCols; |
|
|
|
|
|
i = srcCols; |
|
|
while(i > 0U) |
|
|
{ |
|
|
pDataC = (uint16_t const *) pDataSrc; |
|
|
pDataDestR = pDataDest; |
|
|
|
|
|
blkCnt = srcRows >> 3; |
|
|
while (blkCnt > 0U) |
|
|
{ |
|
|
vecIn = vldrhq_gather_shifted_offset_u16(pDataC, vecOffs); |
|
|
vstrhq_u16(pDataDestR, vecIn); |
|
|
pDataDestR += 8; |
|
|
pDataC = pDataC + srcCols * 8; |
|
|
|
|
|
|
|
|
|
|
|
blkCnt--; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
blkCnt = srcRows & 7; |
|
|
if (blkCnt > 0U) |
|
|
{ |
|
|
mve_pred16_t p0 = vctp16q(blkCnt); |
|
|
vecIn = vldrhq_gather_shifted_offset_u16(pDataC, vecOffs); |
|
|
vstrhq_p_u16(pDataDestR, vecIn, p0); |
|
|
} |
|
|
pDataSrc += 1; |
|
|
pDataDest += srcRows; |
|
|
i--; |
|
|
} |
|
|
|
|
|
return (ARM_MATH_SUCCESS); |
|
|
} |
|
|
|
|
|
|
|
|
__STATIC_INLINE arm_status arm_mat_cmplx_trans_16bit( |
|
|
uint16_t srcRows, |
|
|
uint16_t srcCols, |
|
|
uint16_t *pDataSrc, |
|
|
uint16_t dstRows, |
|
|
uint16_t dstCols, |
|
|
uint16_t *pDataDest) |
|
|
{ |
|
|
static const uint16_t loadCmplxCol[8] = { 0, 0, 1, 1, 2, 2, 3, 3 }; |
|
|
int i; |
|
|
uint16x8_t vecOffsRef, vecOffsCur; |
|
|
uint16_t const *pDataC; |
|
|
uint16_t *pDataRow; |
|
|
uint16_t *pDataDestR, *pDataDestRow; |
|
|
uint32_t blkCnt; |
|
|
uint16x8_t vecIn; |
|
|
|
|
|
#ifdef ARM_MATH_MATRIX_CHECK |
|
|
|
|
|
|
|
|
|
|
|
if ((srcRows != dstCols) || (srcCols != dstRows)) |
|
|
{ |
|
|
|
|
|
|
|
|
|
|
|
return = ARM_MATH_SIZE_MISMATCH; |
|
|
} |
|
|
#else |
|
|
(void)dstRows; |
|
|
(void)dstCols; |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vecOffsRef = vldrhq_u16((uint16_t const *) loadCmplxCol); |
|
|
vecOffsRef = vmulq(vecOffsRef, (uint16_t) (srcCols * CMPLX_DIM)) |
|
|
+ viwdupq_u16((uint32_t)0, (uint16_t) 2, 1); |
|
|
|
|
|
pDataRow = pDataSrc; |
|
|
pDataDestRow = pDataDest; |
|
|
i = srcCols; |
|
|
do |
|
|
{ |
|
|
pDataC = (uint16_t const *) pDataRow; |
|
|
pDataDestR = pDataDestRow; |
|
|
vecOffsCur = vecOffsRef; |
|
|
|
|
|
blkCnt = (srcRows * CMPLX_DIM) >> 3; |
|
|
while (blkCnt > 0U) |
|
|
{ |
|
|
vecIn = vldrhq_gather_shifted_offset(pDataC, vecOffsCur); |
|
|
vstrhq(pDataDestR, vecIn); |
|
|
pDataDestR+= 8; |
|
|
vecOffsCur = vaddq(vecOffsCur, (srcCols << 3)); |
|
|
|
|
|
|
|
|
|
|
|
blkCnt--; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
blkCnt = (srcRows * CMPLX_DIM) & 0x7; |
|
|
if (blkCnt > 0U) |
|
|
{ |
|
|
mve_pred16_t p0 = vctp16q(blkCnt); |
|
|
vecIn = vldrhq_gather_shifted_offset(pDataC, vecOffsCur); |
|
|
vstrhq_p(pDataDestR, vecIn, p0); |
|
|
} |
|
|
|
|
|
pDataRow += CMPLX_DIM; |
|
|
pDataDestRow += (srcRows * CMPLX_DIM); |
|
|
} |
|
|
while (--i); |
|
|
|
|
|
return (ARM_MATH_SUCCESS); |
|
|
} |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEI)) && !defined(ARM_MATH_AUTOVECTORIZE) |
|
|
|
|
|
#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h" |
|
|
|
|
|
#define MVE_ASRL_SAT16(acc, shift) ((sqrshrl_sat48(acc, -(32-shift)) >> 32) & 0xffffffff) |
|
|
#define MVE_ASRL_SAT32(acc, shift) ((sqrshrl(acc, -(32-shift)) >> 32) & 0xffffffff) |
|
|
|
|
|
|
|
|
#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q31_MVE) |
|
|
__STATIC_INLINE q31x4_t FAST_VSQRT_Q31(q31x4_t vecIn) |
|
|
{ |
|
|
q63x2_t vecTmpLL; |
|
|
q31x4_t vecTmp0, vecTmp1; |
|
|
q31_t scale; |
|
|
q63_t tmp64; |
|
|
q31x4_t vecNrm, vecDst, vecIdx, vecSignBits; |
|
|
|
|
|
|
|
|
vecSignBits = vclsq(vecIn); |
|
|
vecSignBits = vbicq(vecSignBits, 1); |
|
|
|
|
|
|
|
|
|
|
|
vecNrm = vshlq(vecIn, vecSignBits); |
|
|
|
|
|
|
|
|
|
|
|
vecIdx = vecNrm >> 24; |
|
|
vecIdx = vecIdx << 1; |
|
|
|
|
|
vecTmp0 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, (uint32x4_t)vecIdx); |
|
|
|
|
|
vecIdx = vecIdx + 1; |
|
|
|
|
|
vecTmp1 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, (uint32x4_t)vecIdx); |
|
|
|
|
|
vecTmp1 = vqrdmulhq(vecTmp1, vecNrm); |
|
|
vecTmp0 = vecTmp0 - vecTmp1; |
|
|
vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0); |
|
|
vecTmp1 = vqrdmulhq(vecNrm, vecTmp1); |
|
|
vecTmp1 = vdupq_n_s32(0x18000000) - vecTmp1; |
|
|
vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1); |
|
|
vecTmpLL = vmullbq_int(vecNrm, vecTmp0); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scale = 26 + (vecSignBits[0] >> 1); |
|
|
tmp64 = asrl(vecTmpLL[0], scale); |
|
|
vecDst[0] = (q31_t) tmp64; |
|
|
|
|
|
scale = 26 + (vecSignBits[2] >> 1); |
|
|
tmp64 = asrl(vecTmpLL[1], scale); |
|
|
vecDst[2] = (q31_t) tmp64; |
|
|
|
|
|
vecTmpLL = vmulltq_int(vecNrm, vecTmp0); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scale = 26 + (vecSignBits[1] >> 1); |
|
|
tmp64 = asrl(vecTmpLL[0], scale); |
|
|
vecDst[1] = (q31_t) tmp64; |
|
|
|
|
|
scale = 26 + (vecSignBits[3] >> 1); |
|
|
tmp64 = asrl(vecTmpLL[1], scale); |
|
|
vecDst[3] = (q31_t) tmp64; |
|
|
|
|
|
|
|
|
|
|
|
vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s32(vecIn, 0)); |
|
|
|
|
|
return vecDst; |
|
|
} |
|
|
#endif |
|
|
|
|
|
#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q15_MVE) |
|
|
__STATIC_INLINE q15x8_t FAST_VSQRT_Q15(q15x8_t vecIn) |
|
|
{ |
|
|
q31x4_t vecTmpLev, vecTmpLodd, vecSignL; |
|
|
q15x8_t vecTmp0, vecTmp1; |
|
|
q15x8_t vecNrm, vecDst, vecIdx, vecSignBits; |
|
|
|
|
|
vecDst = vuninitializedq_s16(); |
|
|
|
|
|
vecSignBits = vclsq(vecIn); |
|
|
vecSignBits = vbicq(vecSignBits, 1); |
|
|
|
|
|
|
|
|
|
|
|
vecNrm = vshlq(vecIn, vecSignBits); |
|
|
|
|
|
vecIdx = vecNrm >> 8; |
|
|
vecIdx = vecIdx << 1; |
|
|
|
|
|
vecTmp0 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, (uint16x8_t)vecIdx); |
|
|
|
|
|
vecIdx = vecIdx + 1; |
|
|
|
|
|
vecTmp1 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, (uint16x8_t)vecIdx); |
|
|
|
|
|
vecTmp1 = vqrdmulhq(vecTmp1, vecNrm); |
|
|
vecTmp0 = vecTmp0 - vecTmp1; |
|
|
vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0); |
|
|
vecTmp1 = vqrdmulhq(vecNrm, vecTmp1); |
|
|
vecTmp1 = vdupq_n_s16(0x1800) - vecTmp1; |
|
|
vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1); |
|
|
|
|
|
vecSignBits = vecSignBits >> 1; |
|
|
|
|
|
vecTmpLev = vmullbq_int(vecNrm, vecTmp0); |
|
|
vecTmpLodd = vmulltq_int(vecNrm, vecTmp0); |
|
|
|
|
|
vecTmp0 = vecSignBits + 10; |
|
|
|
|
|
|
|
|
|
|
|
vecTmp0 = -vecTmp0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vecSignL = vmovlbq(vecTmp0); |
|
|
vecTmpLev = vshlq(vecTmpLev, vecSignL); |
|
|
|
|
|
|
|
|
|
|
|
vecSignL = vmovltq(vecTmp0); |
|
|
vecTmpLodd = vshlq(vecTmpLodd, vecSignL); |
|
|
|
|
|
|
|
|
|
|
|
vecDst = vmovnbq_s32(vecDst, vecTmpLev); |
|
|
vecDst = vmovntq_s32(vecDst, vecTmpLodd); |
|
|
|
|
|
|
|
|
|
|
|
vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s16(vecIn, 0)); |
|
|
|
|
|
return vecDst; |
|
|
} |
|
|
#endif |
|
|
|
|
|
#endif |
|
|
|
|
|
#ifdef __cplusplus |
|
|
} |
|
|
#endif |
|
|
|
|
|
#endif |
|
|
|