|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef OPENCV_HAL_INTRIN_HPP |
|
|
#define OPENCV_HAL_INTRIN_HPP |
|
|
|
|
|
#include <cmath> |
|
|
#include <float.h> |
|
|
#include <stdlib.h> |
|
|
#include "opencv2/core/cvdef.h" |
|
|
|
|
|
#if defined(__GNUC__) && __GNUC__ == 12 |
|
|
#pragma GCC diagnostic push |
|
|
#pragma GCC diagnostic ignored "-Wuninitialized" |
|
|
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" |
|
|
#endif |
|
|
|
|
|
#define OPENCV_HAL_ADD(a, b) ((a) + (b)) |
|
|
#define OPENCV_HAL_AND(a, b) ((a) & (b)) |
|
|
#define OPENCV_HAL_NOP(a) (a) |
|
|
#define OPENCV_HAL_1ST(a, b) (a) |
|
|
|
|
|
namespace { |
|
|
inline unsigned int trailingZeros32(unsigned int value) { |
|
|
#if defined(_MSC_VER) |
|
|
#if (_MSC_VER < 1700) || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) |
|
|
unsigned long index = 0; |
|
|
_BitScanForward(&index, value); |
|
|
return (unsigned int)index; |
|
|
#elif defined(__clang__) |
|
|
|
|
|
return value ? __builtin_ctz(value) : 32; |
|
|
#else |
|
|
return _tzcnt_u32(value); |
|
|
#endif |
|
|
#elif defined(__GNUC__) || defined(__GNUG__) |
|
|
return __builtin_ctz(value); |
|
|
#elif defined(__ICC) || defined(__INTEL_COMPILER) |
|
|
return _bit_scan_forward(value); |
|
|
#elif defined(__clang__) |
|
|
return llvm.cttz.i32(value, true); |
|
|
#else |
|
|
static const int MultiplyDeBruijnBitPosition[32] = { |
|
|
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, |
|
|
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 }; |
|
|
return MultiplyDeBruijnBitPosition[((uint32_t)((value & -value) * 0x077CB531U)) >> 27]; |
|
|
#endif |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
namespace cv { |
|
|
|
|
|
namespace hal { |
|
|
|
|
|
enum StoreMode |
|
|
{ |
|
|
STORE_UNALIGNED = 0, |
|
|
STORE_ALIGNED = 1, |
|
|
STORE_ALIGNED_NOCACHE = 2 |
|
|
}; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
template<typename _Tp> struct V_TypeTraits |
|
|
{ |
|
|
}; |
|
|
|
|
|
#define CV_INTRIN_DEF_TYPE_TRAITS(type, int_type_, uint_type_, abs_type_, w_type_, q_type_, sum_type_) \ |
|
|
template<> struct V_TypeTraits<type> \ |
|
|
{ \ |
|
|
typedef type value_type; \ |
|
|
typedef int_type_ int_type; \ |
|
|
typedef abs_type_ abs_type; \ |
|
|
typedef uint_type_ uint_type; \ |
|
|
typedef w_type_ w_type; \ |
|
|
typedef q_type_ q_type; \ |
|
|
typedef sum_type_ sum_type; \ |
|
|
\ |
|
|
static inline int_type reinterpret_int(type x) \ |
|
|
{ \ |
|
|
union { type l; int_type i; } v; \ |
|
|
v.l = x; \ |
|
|
return v.i; \ |
|
|
} \ |
|
|
\ |
|
|
static inline type reinterpret_from_int(int_type x) \ |
|
|
{ \ |
|
|
union { type l; int_type i; } v; \ |
|
|
v.i = x; \ |
|
|
return v.l; \ |
|
|
} \ |
|
|
} |
|
|
|
|
|
#define CV_INTRIN_DEF_TYPE_TRAITS_NO_Q_TYPE(type, int_type_, uint_type_, abs_type_, w_type_, sum_type_) \ |
|
|
template<> struct V_TypeTraits<type> \ |
|
|
{ \ |
|
|
typedef type value_type; \ |
|
|
typedef int_type_ int_type; \ |
|
|
typedef abs_type_ abs_type; \ |
|
|
typedef uint_type_ uint_type; \ |
|
|
typedef w_type_ w_type; \ |
|
|
typedef sum_type_ sum_type; \ |
|
|
\ |
|
|
static inline int_type reinterpret_int(type x) \ |
|
|
{ \ |
|
|
union { type l; int_type i; } v; \ |
|
|
v.l = x; \ |
|
|
return v.i; \ |
|
|
} \ |
|
|
\ |
|
|
static inline type reinterpret_from_int(int_type x) \ |
|
|
{ \ |
|
|
union { type l; int_type i; } v; \ |
|
|
v.i = x; \ |
|
|
return v.l; \ |
|
|
} \ |
|
|
} |
|
|
|
|
|
CV_INTRIN_DEF_TYPE_TRAITS(uchar, schar, uchar, uchar, ushort, unsigned, unsigned); |
|
|
CV_INTRIN_DEF_TYPE_TRAITS(schar, schar, uchar, uchar, short, int, int); |
|
|
CV_INTRIN_DEF_TYPE_TRAITS(ushort, short, ushort, ushort, unsigned, uint64, unsigned); |
|
|
CV_INTRIN_DEF_TYPE_TRAITS(short, short, ushort, ushort, int, int64, int); |
|
|
CV_INTRIN_DEF_TYPE_TRAITS_NO_Q_TYPE(unsigned, int, unsigned, unsigned, uint64, unsigned); |
|
|
CV_INTRIN_DEF_TYPE_TRAITS_NO_Q_TYPE(int, int, unsigned, unsigned, int64, int); |
|
|
CV_INTRIN_DEF_TYPE_TRAITS_NO_Q_TYPE(float, int, unsigned, float, double, float); |
|
|
CV_INTRIN_DEF_TYPE_TRAITS_NO_Q_TYPE(uint64, int64, uint64, uint64, void, uint64); |
|
|
CV_INTRIN_DEF_TYPE_TRAITS_NO_Q_TYPE(int64, int64, uint64, uint64, void, int64); |
|
|
CV_INTRIN_DEF_TYPE_TRAITS_NO_Q_TYPE(double, int64, uint64, double, void, double); |
|
|
|
|
|
#ifndef CV_DOXYGEN |
|
|
|
|
|
#ifndef CV_CPU_OPTIMIZATION_HAL_NAMESPACE |
|
|
#ifdef CV_FORCE_SIMD128_CPP |
|
|
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE hal_EMULATOR_CPP |
|
|
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN namespace hal_EMULATOR_CPP { |
|
|
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END } |
|
|
#elif defined(CV_CPU_DISPATCH_MODE) |
|
|
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE __CV_CAT(hal_, CV_CPU_DISPATCH_MODE) |
|
|
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN namespace __CV_CAT(hal_, CV_CPU_DISPATCH_MODE) { |
|
|
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END } |
|
|
#else |
|
|
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE hal_baseline |
|
|
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN namespace hal_baseline { |
|
|
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END } |
|
|
#endif |
|
|
#endif |
|
|
|
|
|
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN |
|
|
|
|
|
template <typename _VecTp> inline _VecTp v_setzero_(); |
|
|
template <typename _VecTp> inline _VecTp v_setall_(uchar); |
|
|
template <typename _VecTp> inline _VecTp v_setall_(schar); |
|
|
template <typename _VecTp> inline _VecTp v_setall_(ushort); |
|
|
template <typename _VecTp> inline _VecTp v_setall_(short); |
|
|
template <typename _VecTp> inline _VecTp v_setall_(unsigned); |
|
|
template <typename _VecTp> inline _VecTp v_setall_(int); |
|
|
template <typename _VecTp> inline _VecTp v_setall_(uint64); |
|
|
template <typename _VecTp> inline _VecTp v_setall_(int64); |
|
|
template <typename _VecTp> inline _VecTp v_setall_(float); |
|
|
template <typename _VecTp> inline _VecTp v_setall_(double); |
|
|
|
|
|
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END |
|
|
using namespace CV_CPU_OPTIMIZATION_HAL_NAMESPACE; |
|
|
#endif |
|
|
} |
|
|
|
|
|
#ifdef CV_DOXYGEN |
|
|
# undef CV_AVX2 |
|
|
# undef CV_SSE2 |
|
|
# undef CV_NEON |
|
|
# undef CV_VSX |
|
|
# undef CV_FP16 |
|
|
# undef CV_MSA |
|
|
# undef CV_RVV |
|
|
#endif |
|
|
|
|
|
#if (CV_SSE2 || CV_NEON || CV_VSX || CV_MSA || CV_WASM_SIMD || CV_RVV071 || CV_LSX) && !defined(CV_FORCE_SIMD128_CPP) |
|
|
#define CV__SIMD_FORWARD 128 |
|
|
#include "opencv2/core/hal/intrin_forward.hpp" |
|
|
#endif |
|
|
|
|
|
#if CV_SSE2 && !defined(CV_FORCE_SIMD128_CPP) |
|
|
|
|
|
#include "opencv2/core/hal/intrin_sse_em.hpp" |
|
|
#include "opencv2/core/hal/intrin_sse.hpp" |
|
|
|
|
|
#elif CV_NEON && !defined(CV_FORCE_SIMD128_CPP) |
|
|
|
|
|
#include "opencv2/core/hal/intrin_neon.hpp" |
|
|
|
|
|
#elif CV_RVV071 && !defined(CV_FORCE_SIMD128_CPP) |
|
|
#define CV_SIMD128_CPP 0 |
|
|
#include "opencv2/core/hal/intrin_rvv071.hpp" |
|
|
|
|
|
#elif CV_VSX && !defined(CV_FORCE_SIMD128_CPP) |
|
|
|
|
|
#include "opencv2/core/hal/intrin_vsx.hpp" |
|
|
|
|
|
#elif CV_MSA && !defined(CV_FORCE_SIMD128_CPP) |
|
|
|
|
|
#include "opencv2/core/hal/intrin_msa.hpp" |
|
|
|
|
|
#elif CV_WASM_SIMD && !defined(CV_FORCE_SIMD128_CPP) |
|
|
#include "opencv2/core/hal/intrin_wasm.hpp" |
|
|
|
|
|
#elif CV_RVV && !defined(CV_FORCE_SIMD128_CPP) |
|
|
#include "opencv2/core/hal/intrin_rvv_scalable.hpp" |
|
|
|
|
|
#elif CV_LSX && !defined(CV_FORCE_SIMD128_CPP) |
|
|
|
|
|
#include "opencv2/core/hal/intrin_lsx.hpp" |
|
|
|
|
|
#else |
|
|
|
|
|
#include "opencv2/core/hal/intrin_cpp.hpp" |
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if CV_AVX2 |
|
|
|
|
|
#define CV__SIMD_FORWARD 256 |
|
|
#include "opencv2/core/hal/intrin_forward.hpp" |
|
|
#include "opencv2/core/hal/intrin_avx.hpp" |
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if CV_AVX512_SKX |
|
|
|
|
|
#define CV__SIMD_FORWARD 512 |
|
|
#include "opencv2/core/hal/intrin_forward.hpp" |
|
|
#include "opencv2/core/hal/intrin_avx512.hpp" |
|
|
|
|
|
#endif |
|
|
|
|
|
#if CV_LASX |
|
|
|
|
|
#define CV__SIMD_FORWARD 256 |
|
|
#include "opencv2/core/hal/intrin_forward.hpp" |
|
|
#include "opencv2/core/hal/intrin_lasx.hpp" |
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
namespace cv { |
|
|
|
|
|
#ifndef CV_DOXYGEN |
|
|
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN |
|
|
#endif |
|
|
|
|
|
#ifndef CV_SIMD128 |
|
|
#define CV_SIMD128 0 |
|
|
#endif |
|
|
|
|
|
#ifndef CV_SIMD128_CPP |
|
|
#define CV_SIMD128_CPP 0 |
|
|
#endif |
|
|
|
|
|
#ifndef CV_SIMD128_64F |
|
|
#define CV_SIMD128_64F 0 |
|
|
#endif |
|
|
|
|
|
#ifndef CV_SIMD256 |
|
|
#define CV_SIMD256 0 |
|
|
#endif |
|
|
|
|
|
#ifndef CV_SIMD256_64F |
|
|
#define CV_SIMD256_64F 0 |
|
|
#endif |
|
|
|
|
|
#ifndef CV_SIMD512 |
|
|
#define CV_SIMD512 0 |
|
|
#endif |
|
|
|
|
|
#ifndef CV_SIMD512_64F |
|
|
#define CV_SIMD512_64F 0 |
|
|
#endif |
|
|
|
|
|
#ifndef CV_SIMD128_FP16 |
|
|
#define CV_SIMD128_FP16 0 |
|
|
#endif |
|
|
|
|
|
#ifndef CV_SIMD256_FP16 |
|
|
#define CV_SIMD256_FP16 0 |
|
|
#endif |
|
|
|
|
|
#ifndef CV_SIMD512_FP16 |
|
|
#define CV_SIMD512_FP16 0 |
|
|
#endif |
|
|
|
|
|
#ifndef CV_SIMD_SCALABLE |
|
|
#define CV_SIMD_SCALABLE 0 |
|
|
#endif |
|
|
|
|
|
#ifndef CV_SIMD_SCALABLE_64F |
|
|
#define CV_SIMD_SCALABLE_64F 0 |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
template<typename _Tp> struct V_RegTraits |
|
|
{ |
|
|
}; |
|
|
|
|
|
#define CV_DEF_REG_TRAITS(prefix, _reg, lane_type, suffix, _u_reg, _w_reg, _q_reg, _int_reg, _round_reg) \ |
|
|
template<> struct V_RegTraits<_reg> \ |
|
|
{ \ |
|
|
typedef _reg reg; \ |
|
|
typedef _u_reg u_reg; \ |
|
|
typedef _w_reg w_reg; \ |
|
|
typedef _q_reg q_reg; \ |
|
|
typedef _int_reg int_reg; \ |
|
|
typedef _round_reg round_reg; \ |
|
|
} |
|
|
|
|
|
#if CV_SIMD128 || CV_SIMD128_CPP |
|
|
CV_DEF_REG_TRAITS(v, v_uint8x16, uchar, u8, v_uint8x16, v_uint16x8, v_uint32x4, v_int8x16, void); |
|
|
CV_DEF_REG_TRAITS(v, v_int8x16, schar, s8, v_uint8x16, v_int16x8, v_int32x4, v_int8x16, void); |
|
|
CV_DEF_REG_TRAITS(v, v_uint16x8, ushort, u16, v_uint16x8, v_uint32x4, v_uint64x2, v_int16x8, void); |
|
|
CV_DEF_REG_TRAITS(v, v_int16x8, short, s16, v_uint16x8, v_int32x4, v_int64x2, v_int16x8, void); |
|
|
CV_DEF_REG_TRAITS(v, v_uint32x4, unsigned, u32, v_uint32x4, v_uint64x2, void, v_int32x4, void); |
|
|
CV_DEF_REG_TRAITS(v, v_int32x4, int, s32, v_uint32x4, v_int64x2, void, v_int32x4, void); |
|
|
#if CV_SIMD128_64F || CV_SIMD128_CPP |
|
|
CV_DEF_REG_TRAITS(v, v_float32x4, float, f32, v_float32x4, v_float64x2, void, v_int32x4, v_int32x4); |
|
|
#else |
|
|
CV_DEF_REG_TRAITS(v, v_float32x4, float, f32, v_float32x4, void, void, v_int32x4, v_int32x4); |
|
|
#endif |
|
|
CV_DEF_REG_TRAITS(v, v_uint64x2, uint64, u64, v_uint64x2, void, void, v_int64x2, void); |
|
|
CV_DEF_REG_TRAITS(v, v_int64x2, int64, s64, v_uint64x2, void, void, v_int64x2, void); |
|
|
#if CV_SIMD128_64F |
|
|
CV_DEF_REG_TRAITS(v, v_float64x2, double, f64, v_float64x2, void, void, v_int64x2, v_int32x4); |
|
|
#endif |
|
|
#endif |
|
|
|
|
|
#if CV_SIMD256 |
|
|
CV_DEF_REG_TRAITS(v256, v_uint8x32, uchar, u8, v_uint8x32, v_uint16x16, v_uint32x8, v_int8x32, void); |
|
|
CV_DEF_REG_TRAITS(v256, v_int8x32, schar, s8, v_uint8x32, v_int16x16, v_int32x8, v_int8x32, void); |
|
|
CV_DEF_REG_TRAITS(v256, v_uint16x16, ushort, u16, v_uint16x16, v_uint32x8, v_uint64x4, v_int16x16, void); |
|
|
CV_DEF_REG_TRAITS(v256, v_int16x16, short, s16, v_uint16x16, v_int32x8, v_int64x4, v_int16x16, void); |
|
|
CV_DEF_REG_TRAITS(v256, v_uint32x8, unsigned, u32, v_uint32x8, v_uint64x4, void, v_int32x8, void); |
|
|
CV_DEF_REG_TRAITS(v256, v_int32x8, int, s32, v_uint32x8, v_int64x4, void, v_int32x8, void); |
|
|
CV_DEF_REG_TRAITS(v256, v_float32x8, float, f32, v_float32x8, v_float64x4, void, v_int32x8, v_int32x8); |
|
|
CV_DEF_REG_TRAITS(v256, v_uint64x4, uint64, u64, v_uint64x4, void, void, v_int64x4, void); |
|
|
CV_DEF_REG_TRAITS(v256, v_int64x4, int64, s64, v_uint64x4, void, void, v_int64x4, void); |
|
|
CV_DEF_REG_TRAITS(v256, v_float64x4, double, f64, v_float64x4, void, void, v_int64x4, v_int32x8); |
|
|
#endif |
|
|
|
|
|
#if CV_SIMD512 |
|
|
CV_DEF_REG_TRAITS(v512, v_uint8x64, uchar, u8, v_uint8x64, v_uint16x32, v_uint32x16, v_int8x64, void); |
|
|
CV_DEF_REG_TRAITS(v512, v_int8x64, schar, s8, v_uint8x64, v_int16x32, v_int32x16, v_int8x64, void); |
|
|
CV_DEF_REG_TRAITS(v512, v_uint16x32, ushort, u16, v_uint16x32, v_uint32x16, v_uint64x8, v_int16x32, void); |
|
|
CV_DEF_REG_TRAITS(v512, v_int16x32, short, s16, v_uint16x32, v_int32x16, v_int64x8, v_int16x32, void); |
|
|
CV_DEF_REG_TRAITS(v512, v_uint32x16, unsigned, u32, v_uint32x16, v_uint64x8, void, v_int32x16, void); |
|
|
CV_DEF_REG_TRAITS(v512, v_int32x16, int, s32, v_uint32x16, v_int64x8, void, v_int32x16, void); |
|
|
CV_DEF_REG_TRAITS(v512, v_float32x16, float, f32, v_float32x16, v_float64x8, void, v_int32x16, v_int32x16); |
|
|
CV_DEF_REG_TRAITS(v512, v_uint64x8, uint64, u64, v_uint64x8, void, void, v_int64x8, void); |
|
|
CV_DEF_REG_TRAITS(v512, v_int64x8, int64, s64, v_uint64x8, void, void, v_int64x8, void); |
|
|
CV_DEF_REG_TRAITS(v512, v_float64x8, double, f64, v_float64x8, void, void, v_int64x8, v_int32x16); |
|
|
#endif |
|
|
#if CV_SIMD_SCALABLE |
|
|
CV_DEF_REG_TRAITS(v, v_uint8, uchar, u8, v_uint8, v_uint16, v_uint32, v_int8, void); |
|
|
CV_DEF_REG_TRAITS(v, v_int8, schar, s8, v_uint8, v_int16, v_int32, v_int8, void); |
|
|
CV_DEF_REG_TRAITS(v, v_uint16, ushort, u16, v_uint16, v_uint32, v_uint64, v_int16, void); |
|
|
CV_DEF_REG_TRAITS(v, v_int16, short, s16, v_uint16, v_int32, v_int64, v_int16, void); |
|
|
CV_DEF_REG_TRAITS(v, v_uint32, unsigned, u32, v_uint32, v_uint64, void, v_int32, void); |
|
|
CV_DEF_REG_TRAITS(v, v_int32, int, s32, v_uint32, v_int64, void, v_int32, void); |
|
|
CV_DEF_REG_TRAITS(v, v_float32, float, f32, v_float32, v_float64, void, v_int32, v_int32); |
|
|
CV_DEF_REG_TRAITS(v, v_uint64, uint64, u64, v_uint64, void, void, v_int64, void); |
|
|
CV_DEF_REG_TRAITS(v, v_int64, int64, s64, v_uint64, void, void, v_int64, void); |
|
|
CV_DEF_REG_TRAITS(v, v_float64, double, f64, v_float64, void, void, v_int64, v_int32); |
|
|
#endif |
|
|
|
|
|
|
|
|
#if CV_SIMD512 && (!defined(CV__SIMD_FORCE_WIDTH) || CV__SIMD_FORCE_WIDTH == 512) |
|
|
#define CV__SIMD_NAMESPACE simd512 |
|
|
namespace CV__SIMD_NAMESPACE { |
|
|
#define CV_SIMD 1 |
|
|
#define CV_SIMD_64F CV_SIMD512_64F |
|
|
#define CV_SIMD_FP16 CV_SIMD512_FP16 |
|
|
#define CV_SIMD_WIDTH 64 |
|
|
|
|
|
|
|
|
|
|
|
typedef v_uint8x64 v_uint8; |
|
|
|
|
|
typedef v_int8x64 v_int8; |
|
|
|
|
|
typedef v_uint16x32 v_uint16; |
|
|
|
|
|
typedef v_int16x32 v_int16; |
|
|
|
|
|
typedef v_uint32x16 v_uint32; |
|
|
|
|
|
typedef v_int32x16 v_int32; |
|
|
|
|
|
typedef v_uint64x8 v_uint64; |
|
|
|
|
|
typedef v_int64x8 v_int64; |
|
|
|
|
|
typedef v_float32x16 v_float32; |
|
|
#if CV_SIMD512_64F |
|
|
|
|
|
typedef v_float64x8 v_float64; |
|
|
#endif |
|
|
|
|
|
|
|
|
#define VXPREFIX(func) v512##func |
|
|
} |
|
|
using namespace CV__SIMD_NAMESPACE; |
|
|
#elif CV_SIMD256 && (!defined(CV__SIMD_FORCE_WIDTH) || CV__SIMD_FORCE_WIDTH == 256) |
|
|
#define CV__SIMD_NAMESPACE simd256 |
|
|
namespace CV__SIMD_NAMESPACE { |
|
|
#define CV_SIMD 1 |
|
|
#define CV_SIMD_64F CV_SIMD256_64F |
|
|
#define CV_SIMD_FP16 CV_SIMD256_FP16 |
|
|
#define CV_SIMD_WIDTH 32 |
|
|
|
|
|
|
|
|
|
|
|
typedef v_uint8x32 v_uint8; |
|
|
|
|
|
typedef v_int8x32 v_int8; |
|
|
|
|
|
typedef v_uint16x16 v_uint16; |
|
|
|
|
|
typedef v_int16x16 v_int16; |
|
|
|
|
|
typedef v_uint32x8 v_uint32; |
|
|
|
|
|
typedef v_int32x8 v_int32; |
|
|
|
|
|
typedef v_uint64x4 v_uint64; |
|
|
|
|
|
typedef v_int64x4 v_int64; |
|
|
|
|
|
typedef v_float32x8 v_float32; |
|
|
#if CV_SIMD256_64F |
|
|
|
|
|
typedef v_float64x4 v_float64; |
|
|
#endif |
|
|
|
|
|
|
|
|
#define VXPREFIX(func) v256##func |
|
|
} |
|
|
using namespace CV__SIMD_NAMESPACE; |
|
|
#elif (CV_SIMD128 || CV_SIMD128_CPP) && (!defined(CV__SIMD_FORCE_WIDTH) || CV__SIMD_FORCE_WIDTH == 128) |
|
|
#if defined CV_SIMD128_CPP |
|
|
#define CV__SIMD_NAMESPACE simd128_cpp |
|
|
#else |
|
|
#define CV__SIMD_NAMESPACE simd128 |
|
|
#endif |
|
|
namespace CV__SIMD_NAMESPACE { |
|
|
#define CV_SIMD CV_SIMD128 |
|
|
#define CV_SIMD_64F CV_SIMD128_64F |
|
|
#define CV_SIMD_WIDTH 16 |
|
|
|
|
|
|
|
|
|
|
|
typedef v_uint8x16 v_uint8; |
|
|
|
|
|
typedef v_int8x16 v_int8; |
|
|
|
|
|
typedef v_uint16x8 v_uint16; |
|
|
|
|
|
typedef v_int16x8 v_int16; |
|
|
|
|
|
typedef v_uint32x4 v_uint32; |
|
|
|
|
|
typedef v_int32x4 v_int32; |
|
|
|
|
|
typedef v_uint64x2 v_uint64; |
|
|
|
|
|
typedef v_int64x2 v_int64; |
|
|
|
|
|
typedef v_float32x4 v_float32; |
|
|
#if CV_SIMD128_64F |
|
|
|
|
|
typedef v_float64x2 v_float64; |
|
|
#endif |
|
|
|
|
|
|
|
|
#define VXPREFIX(func) v##func |
|
|
} |
|
|
using namespace CV__SIMD_NAMESPACE; |
|
|
|
|
|
#elif CV_SIMD_SCALABLE |
|
|
#define CV__SIMD_NAMESPACE simd |
|
|
namespace CV__SIMD_NAMESPACE { |
|
|
#define CV_SIMD 0 |
|
|
#define CV_SIMD_WIDTH 128 |
|
|
|
|
|
#define VXPREFIX(func) v##func |
|
|
} |
|
|
using namespace CV__SIMD_NAMESPACE; |
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
#ifndef CV_SIMD_64F |
|
|
#define CV_SIMD_64F 0 |
|
|
#endif |
|
|
|
|
|
namespace CV__SIMD_NAMESPACE { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline v_uint8 vx_setall_u8(uchar v) { return VXPREFIX(_setall_u8)(v); } |
|
|
inline v_int8 vx_setall_s8(schar v) { return VXPREFIX(_setall_s8)(v); } |
|
|
inline v_uint16 vx_setall_u16(ushort v) { return VXPREFIX(_setall_u16)(v); } |
|
|
inline v_int16 vx_setall_s16(short v) { return VXPREFIX(_setall_s16)(v); } |
|
|
inline v_int32 vx_setall_s32(int v) { return VXPREFIX(_setall_s32)(v); } |
|
|
inline v_uint32 vx_setall_u32(unsigned v) { return VXPREFIX(_setall_u32)(v); } |
|
|
inline v_float32 vx_setall_f32(float v) { return VXPREFIX(_setall_f32)(v); } |
|
|
inline v_int64 vx_setall_s64(int64 v) { return VXPREFIX(_setall_s64)(v); } |
|
|
inline v_uint64 vx_setall_u64(uint64 v) { return VXPREFIX(_setall_u64)(v); } |
|
|
#if CV_SIMD_64F || CV_SIMD_SCALABLE_64F |
|
|
inline v_float64 vx_setall_f64(double v) { return VXPREFIX(_setall_f64)(v); } |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline v_uint8 vx_setzero_u8() { return VXPREFIX(_setzero_u8)(); } |
|
|
inline v_int8 vx_setzero_s8() { return VXPREFIX(_setzero_s8)(); } |
|
|
inline v_uint16 vx_setzero_u16() { return VXPREFIX(_setzero_u16)(); } |
|
|
inline v_int16 vx_setzero_s16() { return VXPREFIX(_setzero_s16)(); } |
|
|
inline v_int32 vx_setzero_s32() { return VXPREFIX(_setzero_s32)(); } |
|
|
inline v_uint32 vx_setzero_u32() { return VXPREFIX(_setzero_u32)(); } |
|
|
inline v_float32 vx_setzero_f32() { return VXPREFIX(_setzero_f32)(); } |
|
|
inline v_int64 vx_setzero_s64() { return VXPREFIX(_setzero_s64)(); } |
|
|
inline v_uint64 vx_setzero_u64() { return VXPREFIX(_setzero_u64)(); } |
|
|
#if CV_SIMD_64F || CV_SIMD_SCALABLE_64F |
|
|
inline v_float64 vx_setzero_f64() { return VXPREFIX(_setzero_f64)(); } |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline v_uint8 vx_load(const uchar * ptr) { return VXPREFIX(_load)(ptr); } |
|
|
inline v_int8 vx_load(const schar * ptr) { return VXPREFIX(_load)(ptr); } |
|
|
inline v_uint16 vx_load(const ushort * ptr) { return VXPREFIX(_load)(ptr); } |
|
|
inline v_int16 vx_load(const short * ptr) { return VXPREFIX(_load)(ptr); } |
|
|
inline v_int32 vx_load(const int * ptr) { return VXPREFIX(_load)(ptr); } |
|
|
inline v_uint32 vx_load(const unsigned * ptr) { return VXPREFIX(_load)(ptr); } |
|
|
inline v_float32 vx_load(const float * ptr) { return VXPREFIX(_load)(ptr); } |
|
|
inline v_int64 vx_load(const int64 * ptr) { return VXPREFIX(_load)(ptr); } |
|
|
inline v_uint64 vx_load(const uint64 * ptr) { return VXPREFIX(_load)(ptr); } |
|
|
#if CV_SIMD_64F || CV_SIMD_SCALABLE_64F |
|
|
inline v_float64 vx_load(const double * ptr) { return VXPREFIX(_load)(ptr); } |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline v_uint8 vx_load_aligned(const uchar * ptr) { return VXPREFIX(_load_aligned)(ptr); } |
|
|
inline v_int8 vx_load_aligned(const schar * ptr) { return VXPREFIX(_load_aligned)(ptr); } |
|
|
inline v_uint16 vx_load_aligned(const ushort * ptr) { return VXPREFIX(_load_aligned)(ptr); } |
|
|
inline v_int16 vx_load_aligned(const short * ptr) { return VXPREFIX(_load_aligned)(ptr); } |
|
|
inline v_int32 vx_load_aligned(const int * ptr) { return VXPREFIX(_load_aligned)(ptr); } |
|
|
inline v_uint32 vx_load_aligned(const unsigned * ptr) { return VXPREFIX(_load_aligned)(ptr); } |
|
|
inline v_float32 vx_load_aligned(const float * ptr) { return VXPREFIX(_load_aligned)(ptr); } |
|
|
inline v_int64 vx_load_aligned(const int64 * ptr) { return VXPREFIX(_load_aligned)(ptr); } |
|
|
inline v_uint64 vx_load_aligned(const uint64 * ptr) { return VXPREFIX(_load_aligned)(ptr); } |
|
|
#if CV_SIMD_64F || CV_SIMD_SCALABLE_64F |
|
|
inline v_float64 vx_load_aligned(const double * ptr) { return VXPREFIX(_load_aligned)(ptr); } |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline v_uint8 vx_load_low(const uchar * ptr) { return VXPREFIX(_load_low)(ptr); } |
|
|
inline v_int8 vx_load_low(const schar * ptr) { return VXPREFIX(_load_low)(ptr); } |
|
|
inline v_uint16 vx_load_low(const ushort * ptr) { return VXPREFIX(_load_low)(ptr); } |
|
|
inline v_int16 vx_load_low(const short * ptr) { return VXPREFIX(_load_low)(ptr); } |
|
|
inline v_int32 vx_load_low(const int * ptr) { return VXPREFIX(_load_low)(ptr); } |
|
|
inline v_uint32 vx_load_low(const unsigned * ptr) { return VXPREFIX(_load_low)(ptr); } |
|
|
inline v_float32 vx_load_low(const float * ptr) { return VXPREFIX(_load_low)(ptr); } |
|
|
inline v_int64 vx_load_low(const int64 * ptr) { return VXPREFIX(_load_low)(ptr); } |
|
|
inline v_uint64 vx_load_low(const uint64 * ptr) { return VXPREFIX(_load_low)(ptr); } |
|
|
#if CV_SIMD_64F || CV_SIMD_SCALABLE_64F |
|
|
inline v_float64 vx_load_low(const double * ptr) { return VXPREFIX(_load_low)(ptr); } |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline v_uint8 vx_load_halves(const uchar * ptr0, const uchar * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } |
|
|
inline v_int8 vx_load_halves(const schar * ptr0, const schar * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } |
|
|
inline v_uint16 vx_load_halves(const ushort * ptr0, const ushort * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } |
|
|
inline v_int16 vx_load_halves(const short * ptr0, const short * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } |
|
|
inline v_int32 vx_load_halves(const int * ptr0, const int * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } |
|
|
inline v_uint32 vx_load_halves(const unsigned * ptr0, const unsigned * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } |
|
|
inline v_float32 vx_load_halves(const float * ptr0, const float * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } |
|
|
inline v_int64 vx_load_halves(const int64 * ptr0, const int64 * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } |
|
|
inline v_uint64 vx_load_halves(const uint64 * ptr0, const uint64 * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } |
|
|
#if CV_SIMD_64F || CV_SIMD_SCALABLE_64F |
|
|
inline v_float64 vx_load_halves(const double * ptr0, const double * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline v_uint8 vx_lut(const uchar * ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } |
|
|
inline v_int8 vx_lut(const schar * ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } |
|
|
inline v_uint16 vx_lut(const ushort * ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } |
|
|
inline v_int16 vx_lut(const short* ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } |
|
|
inline v_int32 vx_lut(const int* ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } |
|
|
inline v_uint32 vx_lut(const unsigned* ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } |
|
|
inline v_float32 vx_lut(const float* ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } |
|
|
inline v_int64 vx_lut(const int64 * ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } |
|
|
inline v_uint64 vx_lut(const uint64 * ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } |
|
|
#if CV_SIMD_64F || CV_SIMD_SCALABLE_64F |
|
|
inline v_float64 vx_lut(const double* ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline v_uint8 vx_lut_pairs(const uchar * ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } |
|
|
inline v_int8 vx_lut_pairs(const schar * ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } |
|
|
inline v_uint16 vx_lut_pairs(const ushort * ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } |
|
|
inline v_int16 vx_lut_pairs(const short* ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } |
|
|
inline v_int32 vx_lut_pairs(const int* ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } |
|
|
inline v_uint32 vx_lut_pairs(const unsigned* ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } |
|
|
inline v_float32 vx_lut_pairs(const float* ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } |
|
|
inline v_int64 vx_lut_pairs(const int64 * ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } |
|
|
inline v_uint64 vx_lut_pairs(const uint64 * ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } |
|
|
#if CV_SIMD_64F || CV_SIMD_SCALABLE_64F |
|
|
inline v_float64 vx_lut_pairs(const double* ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline v_uint8 vx_lut_quads(const uchar* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); } |
|
|
inline v_int8 vx_lut_quads(const schar* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); } |
|
|
inline v_uint16 vx_lut_quads(const ushort* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); } |
|
|
inline v_int16 vx_lut_quads(const short* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); } |
|
|
inline v_int32 vx_lut_quads(const int* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); } |
|
|
inline v_uint32 vx_lut_quads(const unsigned* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); } |
|
|
inline v_float32 vx_lut_quads(const float* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); } |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline v_uint16 vx_load_expand(const uchar * ptr) { return VXPREFIX(_load_expand)(ptr); } |
|
|
inline v_int16 vx_load_expand(const schar * ptr) { return VXPREFIX(_load_expand)(ptr); } |
|
|
inline v_uint32 vx_load_expand(const ushort * ptr) { return VXPREFIX(_load_expand)(ptr); } |
|
|
inline v_int32 vx_load_expand(const short* ptr) { return VXPREFIX(_load_expand)(ptr); } |
|
|
inline v_int64 vx_load_expand(const int* ptr) { return VXPREFIX(_load_expand)(ptr); } |
|
|
inline v_uint64 vx_load_expand(const unsigned* ptr) { return VXPREFIX(_load_expand)(ptr); } |
|
|
inline v_float32 vx_load_expand(const hfloat * ptr) { return VXPREFIX(_load_expand)(ptr); } |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline v_uint32 vx_load_expand_q(const uchar * ptr) { return VXPREFIX(_load_expand_q)(ptr); } |
|
|
inline v_int32 vx_load_expand_q(const schar * ptr) { return VXPREFIX(_load_expand_q)(ptr); } |
|
|
|
|
|
|
|
|
|
|
|
inline void vx_cleanup() { VXPREFIX(_cleanup)(); } |
|
|
|
|
|
#if !CV_SIMD_SCALABLE |
|
|
|
|
|
#if !(CV_NEON && !defined(CV_FORCE_SIMD128_CPP)) |
|
|
template<typename T> struct VTraits { |
|
|
static inline int vlanes() { return T::nlanes; } |
|
|
enum { nlanes = T::nlanes, max_nlanes = T::nlanes }; |
|
|
using lane_type = typename T::lane_type; |
|
|
}; |
|
|
|
|
|
|
|
|
#define OPENCV_HAL_WRAP_GRT0(_Tpvec) \ |
|
|
inline typename VTraits<_Tpvec>::lane_type v_get0(const _Tpvec& v) \ |
|
|
{ \ |
|
|
return v.get0(); \ |
|
|
} |
|
|
|
|
|
OPENCV_HAL_WRAP_GRT0(v_uint8) |
|
|
OPENCV_HAL_WRAP_GRT0(v_int8) |
|
|
OPENCV_HAL_WRAP_GRT0(v_uint16) |
|
|
OPENCV_HAL_WRAP_GRT0(v_int16) |
|
|
OPENCV_HAL_WRAP_GRT0(v_uint32) |
|
|
OPENCV_HAL_WRAP_GRT0(v_int32) |
|
|
OPENCV_HAL_WRAP_GRT0(v_uint64) |
|
|
OPENCV_HAL_WRAP_GRT0(v_int64) |
|
|
OPENCV_HAL_WRAP_GRT0(v_float32) |
|
|
#if CV_SIMD_64F |
|
|
OPENCV_HAL_WRAP_GRT0(v_float64) |
|
|
#endif |
|
|
#if CV_SIMD_WIDTH != 16 && CV_SIMD128 |
|
|
OPENCV_HAL_WRAP_GRT0(v_uint8x16) |
|
|
OPENCV_HAL_WRAP_GRT0(v_uint16x8) |
|
|
OPENCV_HAL_WRAP_GRT0(v_uint32x4) |
|
|
OPENCV_HAL_WRAP_GRT0(v_uint64x2) |
|
|
OPENCV_HAL_WRAP_GRT0(v_int8x16) |
|
|
OPENCV_HAL_WRAP_GRT0(v_int16x8) |
|
|
OPENCV_HAL_WRAP_GRT0(v_int32x4) |
|
|
OPENCV_HAL_WRAP_GRT0(v_int64x2) |
|
|
OPENCV_HAL_WRAP_GRT0(v_float32x4) |
|
|
#if CV_SIMD_64F |
|
|
OPENCV_HAL_WRAP_GRT0(v_float64x2) |
|
|
#endif |
|
|
#endif |
|
|
#if CV_SIMD_WIDTH != 32 && CV_SIMD256 |
|
|
OPENCV_HAL_WRAP_GRT0(v_uint8x32) |
|
|
OPENCV_HAL_WRAP_GRT0(v_uint16x16) |
|
|
OPENCV_HAL_WRAP_GRT0(v_uint32x8) |
|
|
OPENCV_HAL_WRAP_GRT0(v_uint64x4) |
|
|
OPENCV_HAL_WRAP_GRT0(v_int8x32) |
|
|
OPENCV_HAL_WRAP_GRT0(v_int16x16) |
|
|
OPENCV_HAL_WRAP_GRT0(v_int32x8) |
|
|
OPENCV_HAL_WRAP_GRT0(v_int64x4) |
|
|
OPENCV_HAL_WRAP_GRT0(v_float32x8) |
|
|
#if CV_SIMD_64F |
|
|
OPENCV_HAL_WRAP_GRT0(v_float64x4) |
|
|
#endif |
|
|
#endif |
|
|
#endif |
|
|
|
|
|
#define OPENCV_HAL_WRAP_BIN_OP_ADDSUB(_Tpvec) \ |
|
|
template<typename... Args> \ |
|
|
inline _Tpvec v_add(const _Tpvec& f1, const _Tpvec& f2, const _Tpvec& f3, const Args&... vf) { \ |
|
|
return v_add(v_add(f1, f2), f3, vf...); \ |
|
|
} |
|
|
|
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_uint8) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_uint16) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_uint32) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_uint64) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_int8) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_int16) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_int32) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_int64) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_float32) |
|
|
#if CV_SIMD_64F |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_float64) |
|
|
#endif |
|
|
#if CV_SIMD_WIDTH != 16 && CV_SIMD128 |
|
|
|
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_uint8x16) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_uint16x8) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_uint32x4) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_uint64x2) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_int8x16) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_int16x8) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_int32x4) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_int64x2) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_float32x4) |
|
|
#if CV_SIMD_64F |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_float64x2) |
|
|
#endif |
|
|
#endif |
|
|
#if CV_SIMD_WIDTH != 32 && CV_SIMD256 |
|
|
|
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_uint8x32) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_uint16x16) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_uint32x8) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_uint64x4) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_int8x32) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_int16x16) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_int32x8) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_int64x4) |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_float32x8) |
|
|
#if CV_SIMD_64F |
|
|
OPENCV_HAL_WRAP_BIN_OP_ADDSUB(v_float64x4) |
|
|
#endif |
|
|
#endif |
|
|
|
|
|
#define OPENCV_HAL_WRAP_BIN_OP_MUL(_Tpvec) \ |
|
|
template<typename... Args> \ |
|
|
inline _Tpvec v_mul(const _Tpvec& f1, const _Tpvec& f2, const _Tpvec& f3, const Args&... vf) { \ |
|
|
return v_mul(v_mul(f1, f2), f3, vf...); \ |
|
|
} |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_uint8) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_int8) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_uint16) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_uint32) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_int16) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_int32) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_float32) |
|
|
#if CV_SIMD_64F |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_float64) |
|
|
#endif |
|
|
#if CV_SIMD_WIDTH != 16 && CV_SIMD128 |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_uint8x16) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_uint16x8) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_uint32x4) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_int8x16) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_int16x8) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_int32x4) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_float32x4) |
|
|
#if CV_SIMD_64F |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_float64x2) |
|
|
#endif |
|
|
#endif |
|
|
#if CV_SIMD_WIDTH != 32 && CV_SIMD256 |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_uint8x32) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_uint16x16) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_uint32x8) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_int8x32) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_int16x16) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_int32x8) |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_float32x8) |
|
|
#if CV_SIMD_64F |
|
|
OPENCV_HAL_WRAP_BIN_OP_MUL(v_float64x4) |
|
|
#endif |
|
|
#endif |
|
|
|
|
|
#define OPENCV_HAL_WRAP_EXTRACT(_Tpvec) \ |
|
|
inline typename VTraits<_Tpvec>::lane_type v_extract_highest(const _Tpvec& v) \ |
|
|
{ \ |
|
|
return v_extract_n<VTraits<_Tpvec>::nlanes-1>(v); \ |
|
|
} |
|
|
|
|
|
OPENCV_HAL_WRAP_EXTRACT(v_uint8) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_int8) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_uint16) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_int16) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_uint32) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_int32) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_uint64) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_int64) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_float32) |
|
|
#if CV_SIMD_64F |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_float64) |
|
|
#endif |
|
|
#if CV_SIMD_WIDTH != 16 && CV_SIMD128 |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_uint8x16) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_uint16x8) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_uint32x4) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_uint64x2) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_int8x16) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_int16x8) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_int32x4) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_int64x2) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_float32x4) |
|
|
#if CV_SIMD_64F |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_float64x2) |
|
|
#endif |
|
|
#endif |
|
|
#if CV_SIMD_WIDTH != 32 && CV_SIMD256 |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_uint8x32) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_uint16x16) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_uint32x8) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_uint64x4) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_int8x32) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_int16x16) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_int32x8) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_int64x4) |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_float32x8) |
|
|
#if CV_SIMD_64F |
|
|
OPENCV_HAL_WRAP_EXTRACT(v_float64x4) |
|
|
#endif |
|
|
#endif |
|
|
|
|
|
#define OPENCV_HAL_WRAP_BROADCAST(_Tpvec) \ |
|
|
inline _Tpvec v_broadcast_highest(const _Tpvec& v) \ |
|
|
{ \ |
|
|
return v_broadcast_element<VTraits<_Tpvec>::nlanes-1>(v); \ |
|
|
} |
|
|
|
|
|
OPENCV_HAL_WRAP_BROADCAST(v_uint32) |
|
|
OPENCV_HAL_WRAP_BROADCAST(v_int32) |
|
|
OPENCV_HAL_WRAP_BROADCAST(v_float32) |
|
|
#if CV_SIMD_WIDTH != 16 && CV_SIMD128 |
|
|
OPENCV_HAL_WRAP_BROADCAST(v_uint32x4) |
|
|
OPENCV_HAL_WRAP_BROADCAST(v_int32x4) |
|
|
OPENCV_HAL_WRAP_BROADCAST(v_float32x4) |
|
|
#endif |
|
|
#if CV_SIMD_WIDTH != 32 && CV_SIMD256 |
|
|
OPENCV_HAL_WRAP_BROADCAST(v_uint32x8) |
|
|
OPENCV_HAL_WRAP_BROADCAST(v_int32x8) |
|
|
OPENCV_HAL_WRAP_BROADCAST(v_float32x8) |
|
|
#endif |
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template<typename _Tp, typename _Tvec> static inline |
|
|
void vx_store(_Tp* dst, const _Tvec& v) { return v_store(dst, v); } |
|
|
|
|
|
template<typename _Tp, typename _Tvec> static inline |
|
|
void vx_store_aligned(_Tp* dst, const _Tvec& v) { return v_store_aligned(dst, v); } |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#undef VXPREFIX |
|
|
} |
|
|
|
|
|
|
|
|
#ifndef CV_SIMD_FP16 |
|
|
#define CV_SIMD_FP16 0 |
|
|
#endif |
|
|
|
|
|
#ifndef CV_SIMD |
|
|
#define CV_SIMD 0 |
|
|
#endif |
|
|
|
|
|
#include "simd_utils.impl.hpp" |
|
|
|
|
|
#ifndef CV_DOXYGEN |
|
|
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END |
|
|
#endif |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
#if defined(__GNUC__) && __GNUC__ == 12 |
|
|
#pragma GCC diagnostic pop |
|
|
#endif |
|
|
|
|
|
#endif |
|
|
|